repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
eventable/vobject | docs/build/lib/vobject/icalendar.py | VAlarm.generateImplicitParameters | def generateImplicitParameters(obj):
"""
Create default ACTION and TRIGGER if they're not set.
"""
try:
obj.action
except AttributeError:
obj.add('action').value = 'AUDIO'
try:
obj.trigger
except AttributeError:
obj.add('trigger').value = datetime.timedelta(0) | python | def generateImplicitParameters(obj):
"""
Create default ACTION and TRIGGER if they're not set.
"""
try:
obj.action
except AttributeError:
obj.add('action').value = 'AUDIO'
try:
obj.trigger
except AttributeError:
obj.add('trigger').value = datetime.timedelta(0) | [
"def",
"generateImplicitParameters",
"(",
"obj",
")",
":",
"try",
":",
"obj",
".",
"action",
"except",
"AttributeError",
":",
"obj",
".",
"add",
"(",
"'action'",
")",
".",
"value",
"=",
"'AUDIO'",
"try",
":",
"obj",
".",
"trigger",
"except",
"AttributeError",
":",
"obj",
".",
"add",
"(",
"'trigger'",
")",
".",
"value",
"=",
"datetime",
".",
"timedelta",
"(",
"0",
")"
] | Create default ACTION and TRIGGER if they're not set. | [
"Create",
"default",
"ACTION",
"and",
"TRIGGER",
"if",
"they",
"re",
"not",
"set",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1208-L1219 | train | 234,900 |
eventable/vobject | docs/build/lib/vobject/icalendar.py | Duration.transformToNative | def transformToNative(obj):
"""
Turn obj.value into a datetime.timedelta.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value=obj.value
if obj.value == '':
return obj
else:
deltalist=stringToDurations(obj.value)
# When can DURATION have multiple durations? For now:
if len(deltalist) == 1:
obj.value = deltalist[0]
return obj
else:
raise ParseError("DURATION must have a single duration string.") | python | def transformToNative(obj):
"""
Turn obj.value into a datetime.timedelta.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value=obj.value
if obj.value == '':
return obj
else:
deltalist=stringToDurations(obj.value)
# When can DURATION have multiple durations? For now:
if len(deltalist) == 1:
obj.value = deltalist[0]
return obj
else:
raise ParseError("DURATION must have a single duration string.") | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"obj",
".",
"value",
"=",
"obj",
".",
"value",
"if",
"obj",
".",
"value",
"==",
"''",
":",
"return",
"obj",
"else",
":",
"deltalist",
"=",
"stringToDurations",
"(",
"obj",
".",
"value",
")",
"# When can DURATION have multiple durations? For now:",
"if",
"len",
"(",
"deltalist",
")",
"==",
"1",
":",
"obj",
".",
"value",
"=",
"deltalist",
"[",
"0",
"]",
"return",
"obj",
"else",
":",
"raise",
"ParseError",
"(",
"\"DURATION must have a single duration string.\"",
")"
] | Turn obj.value into a datetime.timedelta. | [
"Turn",
"obj",
".",
"value",
"into",
"a",
"datetime",
".",
"timedelta",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1332-L1349 | train | 234,901 |
eventable/vobject | docs/build/lib/vobject/icalendar.py | Duration.transformFromNative | def transformFromNative(obj):
"""
Replace the datetime.timedelta in obj.value with an RFC2445 string.
"""
if not obj.isNative:
return obj
obj.isNative = False
obj.value = timedeltaToString(obj.value)
return obj | python | def transformFromNative(obj):
"""
Replace the datetime.timedelta in obj.value with an RFC2445 string.
"""
if not obj.isNative:
return obj
obj.isNative = False
obj.value = timedeltaToString(obj.value)
return obj | [
"def",
"transformFromNative",
"(",
"obj",
")",
":",
"if",
"not",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"False",
"obj",
".",
"value",
"=",
"timedeltaToString",
"(",
"obj",
".",
"value",
")",
"return",
"obj"
] | Replace the datetime.timedelta in obj.value with an RFC2445 string. | [
"Replace",
"the",
"datetime",
".",
"timedelta",
"in",
"obj",
".",
"value",
"with",
"an",
"RFC2445",
"string",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1352-L1360 | train | 234,902 |
eventable/vobject | docs/build/lib/vobject/icalendar.py | Trigger.transformToNative | def transformToNative(obj):
"""
Turn obj.value into a timedelta or datetime.
"""
if obj.isNative:
return obj
value = getattr(obj, 'value_param', 'DURATION').upper()
if hasattr(obj, 'value_param'):
del obj.value_param
if obj.value == '':
obj.isNative = True
return obj
elif value == 'DURATION':
try:
return Duration.transformToNative(obj)
except ParseError:
logger.warning("TRIGGER not recognized as DURATION, trying "
"DATE-TIME, because iCal sometimes exports "
"DATE-TIMEs without setting VALUE=DATE-TIME")
try:
obj.isNative = False
dt = DateTimeBehavior.transformToNative(obj)
return dt
except:
msg = "TRIGGER with no VALUE not recognized as DURATION " \
"or as DATE-TIME"
raise ParseError(msg)
elif value == 'DATE-TIME':
# TRIGGERs with DATE-TIME values must be in UTC, we could validate
# that fact, for now we take it on faith.
return DateTimeBehavior.transformToNative(obj)
else:
raise ParseError("VALUE must be DURATION or DATE-TIME") | python | def transformToNative(obj):
"""
Turn obj.value into a timedelta or datetime.
"""
if obj.isNative:
return obj
value = getattr(obj, 'value_param', 'DURATION').upper()
if hasattr(obj, 'value_param'):
del obj.value_param
if obj.value == '':
obj.isNative = True
return obj
elif value == 'DURATION':
try:
return Duration.transformToNative(obj)
except ParseError:
logger.warning("TRIGGER not recognized as DURATION, trying "
"DATE-TIME, because iCal sometimes exports "
"DATE-TIMEs without setting VALUE=DATE-TIME")
try:
obj.isNative = False
dt = DateTimeBehavior.transformToNative(obj)
return dt
except:
msg = "TRIGGER with no VALUE not recognized as DURATION " \
"or as DATE-TIME"
raise ParseError(msg)
elif value == 'DATE-TIME':
# TRIGGERs with DATE-TIME values must be in UTC, we could validate
# that fact, for now we take it on faith.
return DateTimeBehavior.transformToNative(obj)
else:
raise ParseError("VALUE must be DURATION or DATE-TIME") | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"value",
"=",
"getattr",
"(",
"obj",
",",
"'value_param'",
",",
"'DURATION'",
")",
".",
"upper",
"(",
")",
"if",
"hasattr",
"(",
"obj",
",",
"'value_param'",
")",
":",
"del",
"obj",
".",
"value_param",
"if",
"obj",
".",
"value",
"==",
"''",
":",
"obj",
".",
"isNative",
"=",
"True",
"return",
"obj",
"elif",
"value",
"==",
"'DURATION'",
":",
"try",
":",
"return",
"Duration",
".",
"transformToNative",
"(",
"obj",
")",
"except",
"ParseError",
":",
"logger",
".",
"warning",
"(",
"\"TRIGGER not recognized as DURATION, trying \"",
"\"DATE-TIME, because iCal sometimes exports \"",
"\"DATE-TIMEs without setting VALUE=DATE-TIME\"",
")",
"try",
":",
"obj",
".",
"isNative",
"=",
"False",
"dt",
"=",
"DateTimeBehavior",
".",
"transformToNative",
"(",
"obj",
")",
"return",
"dt",
"except",
":",
"msg",
"=",
"\"TRIGGER with no VALUE not recognized as DURATION \"",
"\"or as DATE-TIME\"",
"raise",
"ParseError",
"(",
"msg",
")",
"elif",
"value",
"==",
"'DATE-TIME'",
":",
"# TRIGGERs with DATE-TIME values must be in UTC, we could validate",
"# that fact, for now we take it on faith.",
"return",
"DateTimeBehavior",
".",
"transformToNative",
"(",
"obj",
")",
"else",
":",
"raise",
"ParseError",
"(",
"\"VALUE must be DURATION or DATE-TIME\"",
")"
] | Turn obj.value into a timedelta or datetime. | [
"Turn",
"obj",
".",
"value",
"into",
"a",
"timedelta",
"or",
"datetime",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1374-L1406 | train | 234,903 |
eventable/vobject | docs/build/lib/vobject/icalendar.py | PeriodBehavior.transformToNative | def transformToNative(obj):
"""
Convert comma separated periods into tuples.
"""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
obj.value = [stringToPeriod(x, tzinfo) for x in obj.value.split(",")]
return obj | python | def transformToNative(obj):
"""
Convert comma separated periods into tuples.
"""
if obj.isNative:
return obj
obj.isNative = True
if obj.value == '':
obj.value = []
return obj
tzinfo = getTzid(getattr(obj, 'tzid_param', None))
obj.value = [stringToPeriod(x, tzinfo) for x in obj.value.split(",")]
return obj | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"if",
"obj",
".",
"value",
"==",
"''",
":",
"obj",
".",
"value",
"=",
"[",
"]",
"return",
"obj",
"tzinfo",
"=",
"getTzid",
"(",
"getattr",
"(",
"obj",
",",
"'tzid_param'",
",",
"None",
")",
")",
"obj",
".",
"value",
"=",
"[",
"stringToPeriod",
"(",
"x",
",",
"tzinfo",
")",
"for",
"x",
"in",
"obj",
".",
"value",
".",
"split",
"(",
"\",\"",
")",
"]",
"return",
"obj"
] | Convert comma separated periods into tuples. | [
"Convert",
"comma",
"separated",
"periods",
"into",
"tuples",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1428-L1440 | train | 234,904 |
eventable/vobject | docs/build/lib/vobject/icalendar.py | PeriodBehavior.transformFromNative | def transformFromNative(cls, obj):
"""
Convert the list of tuples in obj.value to strings.
"""
if obj.isNative:
obj.isNative = False
transformed = []
for tup in obj.value:
transformed.append(periodToString(tup, cls.forceUTC))
if len(transformed) > 0:
tzid = TimezoneComponent.registerTzinfo(tup[0].tzinfo)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
obj.value = ','.join(transformed)
return obj | python | def transformFromNative(cls, obj):
"""
Convert the list of tuples in obj.value to strings.
"""
if obj.isNative:
obj.isNative = False
transformed = []
for tup in obj.value:
transformed.append(periodToString(tup, cls.forceUTC))
if len(transformed) > 0:
tzid = TimezoneComponent.registerTzinfo(tup[0].tzinfo)
if not cls.forceUTC and tzid is not None:
obj.tzid_param = tzid
obj.value = ','.join(transformed)
return obj | [
"def",
"transformFromNative",
"(",
"cls",
",",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"obj",
".",
"isNative",
"=",
"False",
"transformed",
"=",
"[",
"]",
"for",
"tup",
"in",
"obj",
".",
"value",
":",
"transformed",
".",
"append",
"(",
"periodToString",
"(",
"tup",
",",
"cls",
".",
"forceUTC",
")",
")",
"if",
"len",
"(",
"transformed",
")",
">",
"0",
":",
"tzid",
"=",
"TimezoneComponent",
".",
"registerTzinfo",
"(",
"tup",
"[",
"0",
"]",
".",
"tzinfo",
")",
"if",
"not",
"cls",
".",
"forceUTC",
"and",
"tzid",
"is",
"not",
"None",
":",
"obj",
".",
"tzid_param",
"=",
"tzid",
"obj",
".",
"value",
"=",
"','",
".",
"join",
"(",
"transformed",
")",
"return",
"obj"
] | Convert the list of tuples in obj.value to strings. | [
"Convert",
"the",
"list",
"of",
"tuples",
"in",
"obj",
".",
"value",
"to",
"strings",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/icalendar.py#L1443-L1459 | train | 234,905 |
eventable/vobject | vobject/vcard.py | serializeFields | def serializeFields(obj, order=None):
"""
Turn an object's fields into a ';' and ',' seperated string.
If order is None, obj should be a list, backslash escape each field and
return a ';' separated string.
"""
fields = []
if order is None:
fields = [backslashEscape(val) for val in obj]
else:
for field in order:
escapedValueList = [backslashEscape(val) for val in
toList(getattr(obj, field))]
fields.append(','.join(escapedValueList))
return ';'.join(fields) | python | def serializeFields(obj, order=None):
"""
Turn an object's fields into a ';' and ',' seperated string.
If order is None, obj should be a list, backslash escape each field and
return a ';' separated string.
"""
fields = []
if order is None:
fields = [backslashEscape(val) for val in obj]
else:
for field in order:
escapedValueList = [backslashEscape(val) for val in
toList(getattr(obj, field))]
fields.append(','.join(escapedValueList))
return ';'.join(fields) | [
"def",
"serializeFields",
"(",
"obj",
",",
"order",
"=",
"None",
")",
":",
"fields",
"=",
"[",
"]",
"if",
"order",
"is",
"None",
":",
"fields",
"=",
"[",
"backslashEscape",
"(",
"val",
")",
"for",
"val",
"in",
"obj",
"]",
"else",
":",
"for",
"field",
"in",
"order",
":",
"escapedValueList",
"=",
"[",
"backslashEscape",
"(",
"val",
")",
"for",
"val",
"in",
"toList",
"(",
"getattr",
"(",
"obj",
",",
"field",
")",
")",
"]",
"fields",
".",
"append",
"(",
"','",
".",
"join",
"(",
"escapedValueList",
")",
")",
"return",
"';'",
".",
"join",
"(",
"fields",
")"
] | Turn an object's fields into a ';' and ',' seperated string.
If order is None, obj should be a list, backslash escape each field and
return a ';' separated string. | [
"Turn",
"an",
"object",
"s",
"fields",
"into",
"a",
";",
"and",
"seperated",
"string",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L264-L279 | train | 234,906 |
eventable/vobject | vobject/vcard.py | Address.toString | def toString(val, join_char='\n'):
"""
Turn a string or array value into a string.
"""
if type(val) in (list, tuple):
return join_char.join(val)
return val | python | def toString(val, join_char='\n'):
"""
Turn a string or array value into a string.
"""
if type(val) in (list, tuple):
return join_char.join(val)
return val | [
"def",
"toString",
"(",
"val",
",",
"join_char",
"=",
"'\\n'",
")",
":",
"if",
"type",
"(",
"val",
")",
"in",
"(",
"list",
",",
"tuple",
")",
":",
"return",
"join_char",
".",
"join",
"(",
"val",
")",
"return",
"val"
] | Turn a string or array value into a string. | [
"Turn",
"a",
"string",
"or",
"array",
"value",
"into",
"a",
"string",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L75-L81 | train | 234,907 |
eventable/vobject | vobject/vcard.py | NameBehavior.transformToNative | def transformToNative(obj):
"""
Turn obj.value into a Name.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = Name(**dict(zip(NAME_ORDER, splitFields(obj.value))))
return obj | python | def transformToNative(obj):
"""
Turn obj.value into a Name.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = Name(**dict(zip(NAME_ORDER, splitFields(obj.value))))
return obj | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"obj",
".",
"value",
"=",
"Name",
"(",
"*",
"*",
"dict",
"(",
"zip",
"(",
"NAME_ORDER",
",",
"splitFields",
"(",
"obj",
".",
"value",
")",
")",
")",
")",
"return",
"obj"
] | Turn obj.value into a Name. | [
"Turn",
"obj",
".",
"value",
"into",
"a",
"Name",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L294-L302 | train | 234,908 |
eventable/vobject | vobject/vcard.py | NameBehavior.transformFromNative | def transformFromNative(obj):
"""
Replace the Name in obj.value with a string.
"""
obj.isNative = False
obj.value = serializeFields(obj.value, NAME_ORDER)
return obj | python | def transformFromNative(obj):
"""
Replace the Name in obj.value with a string.
"""
obj.isNative = False
obj.value = serializeFields(obj.value, NAME_ORDER)
return obj | [
"def",
"transformFromNative",
"(",
"obj",
")",
":",
"obj",
".",
"isNative",
"=",
"False",
"obj",
".",
"value",
"=",
"serializeFields",
"(",
"obj",
".",
"value",
",",
"NAME_ORDER",
")",
"return",
"obj"
] | Replace the Name in obj.value with a string. | [
"Replace",
"the",
"Name",
"in",
"obj",
".",
"value",
"with",
"a",
"string",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L305-L311 | train | 234,909 |
eventable/vobject | vobject/vcard.py | AddressBehavior.transformToNative | def transformToNative(obj):
"""
Turn obj.value into an Address.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = Address(**dict(zip(ADDRESS_ORDER, splitFields(obj.value))))
return obj | python | def transformToNative(obj):
"""
Turn obj.value into an Address.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = Address(**dict(zip(ADDRESS_ORDER, splitFields(obj.value))))
return obj | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"obj",
".",
"value",
"=",
"Address",
"(",
"*",
"*",
"dict",
"(",
"zip",
"(",
"ADDRESS_ORDER",
",",
"splitFields",
"(",
"obj",
".",
"value",
")",
")",
")",
")",
"return",
"obj"
] | Turn obj.value into an Address. | [
"Turn",
"obj",
".",
"value",
"into",
"an",
"Address",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L322-L330 | train | 234,910 |
eventable/vobject | vobject/vcard.py | OrgBehavior.transformToNative | def transformToNative(obj):
"""
Turn obj.value into a list.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = splitFields(obj.value)
return obj | python | def transformToNative(obj):
"""
Turn obj.value into a list.
"""
if obj.isNative:
return obj
obj.isNative = True
obj.value = splitFields(obj.value)
return obj | [
"def",
"transformToNative",
"(",
"obj",
")",
":",
"if",
"obj",
".",
"isNative",
":",
"return",
"obj",
"obj",
".",
"isNative",
"=",
"True",
"obj",
".",
"value",
"=",
"splitFields",
"(",
"obj",
".",
"value",
")",
"return",
"obj"
] | Turn obj.value into a list. | [
"Turn",
"obj",
".",
"value",
"into",
"a",
"list",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/vcard.py#L350-L358 | train | 234,911 |
eventable/vobject | docs/build/lib/vobject/vcard.py | VCardTextBehavior.decode | def decode(cls, line):
"""
Remove backslash escaping from line.valueDecode line, either to remove
backslash espacing, or to decode base64 encoding. The content line should
contain a ENCODING=b for base64 encoding, but Apple Addressbook seems to
export a singleton parameter of 'BASE64', which does not match the 3.0
vCard spec. If we encouter that, then we transform the parameter to
ENCODING=b
"""
if line.encoded:
if 'BASE64' in line.singletonparams:
line.singletonparams.remove('BASE64')
line.encoding_param = cls.base64string
encoding = getattr(line, 'encoding_param', None)
if encoding:
line.value = codecs.decode(line.value.encode("utf-8"), "base64")
else:
line.value = stringToTextValues(line.value)[0]
line.encoded=False | python | def decode(cls, line):
"""
Remove backslash escaping from line.valueDecode line, either to remove
backslash espacing, or to decode base64 encoding. The content line should
contain a ENCODING=b for base64 encoding, but Apple Addressbook seems to
export a singleton parameter of 'BASE64', which does not match the 3.0
vCard spec. If we encouter that, then we transform the parameter to
ENCODING=b
"""
if line.encoded:
if 'BASE64' in line.singletonparams:
line.singletonparams.remove('BASE64')
line.encoding_param = cls.base64string
encoding = getattr(line, 'encoding_param', None)
if encoding:
line.value = codecs.decode(line.value.encode("utf-8"), "base64")
else:
line.value = stringToTextValues(line.value)[0]
line.encoded=False | [
"def",
"decode",
"(",
"cls",
",",
"line",
")",
":",
"if",
"line",
".",
"encoded",
":",
"if",
"'BASE64'",
"in",
"line",
".",
"singletonparams",
":",
"line",
".",
"singletonparams",
".",
"remove",
"(",
"'BASE64'",
")",
"line",
".",
"encoding_param",
"=",
"cls",
".",
"base64string",
"encoding",
"=",
"getattr",
"(",
"line",
",",
"'encoding_param'",
",",
"None",
")",
"if",
"encoding",
":",
"line",
".",
"value",
"=",
"codecs",
".",
"decode",
"(",
"line",
".",
"value",
".",
"encode",
"(",
"\"utf-8\"",
")",
",",
"\"base64\"",
")",
"else",
":",
"line",
".",
"value",
"=",
"stringToTextValues",
"(",
"line",
".",
"value",
")",
"[",
"0",
"]",
"line",
".",
"encoded",
"=",
"False"
] | Remove backslash escaping from line.valueDecode line, either to remove
backslash espacing, or to decode base64 encoding. The content line should
contain a ENCODING=b for base64 encoding, but Apple Addressbook seems to
export a singleton parameter of 'BASE64', which does not match the 3.0
vCard spec. If we encouter that, then we transform the parameter to
ENCODING=b | [
"Remove",
"backslash",
"escaping",
"from",
"line",
".",
"valueDecode",
"line",
"either",
"to",
"remove",
"backslash",
"espacing",
"or",
"to",
"decode",
"base64",
"encoding",
".",
"The",
"content",
"line",
"should",
"contain",
"a",
"ENCODING",
"=",
"b",
"for",
"base64",
"encoding",
"but",
"Apple",
"Addressbook",
"seems",
"to",
"export",
"a",
"singleton",
"parameter",
"of",
"BASE64",
"which",
"does",
"not",
"match",
"the",
"3",
".",
"0",
"vCard",
"spec",
".",
"If",
"we",
"encouter",
"that",
"then",
"we",
"transform",
"the",
"parameter",
"to",
"ENCODING",
"=",
"b"
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/docs/build/lib/vobject/vcard.py#L124-L142 | train | 234,912 |
eventable/vobject | vobject/behavior.py | Behavior.validate | def validate(cls, obj, raiseException=False, complainUnrecognized=False):
"""Check if the object satisfies this behavior's requirements.
@param obj:
The L{ContentLine<base.ContentLine>} or
L{Component<base.Component>} to be validated.
@param raiseException:
If True, raise a L{base.ValidateError} on validation failure.
Otherwise return a boolean.
@param complainUnrecognized:
If True, fail to validate if an uncrecognized parameter or child is
found. Otherwise log the lack of recognition.
"""
if not cls.allowGroup and obj.group is not None:
err = "{0} has a group, but this object doesn't support groups".format(obj)
raise base.VObjectError(err)
if isinstance(obj, base.ContentLine):
return cls.lineValidate(obj, raiseException, complainUnrecognized)
elif isinstance(obj, base.Component):
count = {}
for child in obj.getChildren():
if not child.validate(raiseException, complainUnrecognized):
return False
name = child.name.upper()
count[name] = count.get(name, 0) + 1
for key, val in cls.knownChildren.items():
if count.get(key, 0) < val[0]:
if raiseException:
m = "{0} components must contain at least {1} {2}"
raise base.ValidateError(m .format(cls.name, val[0], key))
return False
if val[1] and count.get(key, 0) > val[1]:
if raiseException:
m = "{0} components cannot contain more than {1} {2}"
raise base.ValidateError(m.format(cls.name, val[1], key))
return False
return True
else:
err = "{0} is not a Component or Contentline".format(obj)
raise base.VObjectError(err) | python | def validate(cls, obj, raiseException=False, complainUnrecognized=False):
"""Check if the object satisfies this behavior's requirements.
@param obj:
The L{ContentLine<base.ContentLine>} or
L{Component<base.Component>} to be validated.
@param raiseException:
If True, raise a L{base.ValidateError} on validation failure.
Otherwise return a boolean.
@param complainUnrecognized:
If True, fail to validate if an uncrecognized parameter or child is
found. Otherwise log the lack of recognition.
"""
if not cls.allowGroup and obj.group is not None:
err = "{0} has a group, but this object doesn't support groups".format(obj)
raise base.VObjectError(err)
if isinstance(obj, base.ContentLine):
return cls.lineValidate(obj, raiseException, complainUnrecognized)
elif isinstance(obj, base.Component):
count = {}
for child in obj.getChildren():
if not child.validate(raiseException, complainUnrecognized):
return False
name = child.name.upper()
count[name] = count.get(name, 0) + 1
for key, val in cls.knownChildren.items():
if count.get(key, 0) < val[0]:
if raiseException:
m = "{0} components must contain at least {1} {2}"
raise base.ValidateError(m .format(cls.name, val[0], key))
return False
if val[1] and count.get(key, 0) > val[1]:
if raiseException:
m = "{0} components cannot contain more than {1} {2}"
raise base.ValidateError(m.format(cls.name, val[1], key))
return False
return True
else:
err = "{0} is not a Component or Contentline".format(obj)
raise base.VObjectError(err) | [
"def",
"validate",
"(",
"cls",
",",
"obj",
",",
"raiseException",
"=",
"False",
",",
"complainUnrecognized",
"=",
"False",
")",
":",
"if",
"not",
"cls",
".",
"allowGroup",
"and",
"obj",
".",
"group",
"is",
"not",
"None",
":",
"err",
"=",
"\"{0} has a group, but this object doesn't support groups\"",
".",
"format",
"(",
"obj",
")",
"raise",
"base",
".",
"VObjectError",
"(",
"err",
")",
"if",
"isinstance",
"(",
"obj",
",",
"base",
".",
"ContentLine",
")",
":",
"return",
"cls",
".",
"lineValidate",
"(",
"obj",
",",
"raiseException",
",",
"complainUnrecognized",
")",
"elif",
"isinstance",
"(",
"obj",
",",
"base",
".",
"Component",
")",
":",
"count",
"=",
"{",
"}",
"for",
"child",
"in",
"obj",
".",
"getChildren",
"(",
")",
":",
"if",
"not",
"child",
".",
"validate",
"(",
"raiseException",
",",
"complainUnrecognized",
")",
":",
"return",
"False",
"name",
"=",
"child",
".",
"name",
".",
"upper",
"(",
")",
"count",
"[",
"name",
"]",
"=",
"count",
".",
"get",
"(",
"name",
",",
"0",
")",
"+",
"1",
"for",
"key",
",",
"val",
"in",
"cls",
".",
"knownChildren",
".",
"items",
"(",
")",
":",
"if",
"count",
".",
"get",
"(",
"key",
",",
"0",
")",
"<",
"val",
"[",
"0",
"]",
":",
"if",
"raiseException",
":",
"m",
"=",
"\"{0} components must contain at least {1} {2}\"",
"raise",
"base",
".",
"ValidateError",
"(",
"m",
".",
"format",
"(",
"cls",
".",
"name",
",",
"val",
"[",
"0",
"]",
",",
"key",
")",
")",
"return",
"False",
"if",
"val",
"[",
"1",
"]",
"and",
"count",
".",
"get",
"(",
"key",
",",
"0",
")",
">",
"val",
"[",
"1",
"]",
":",
"if",
"raiseException",
":",
"m",
"=",
"\"{0} components cannot contain more than {1} {2}\"",
"raise",
"base",
".",
"ValidateError",
"(",
"m",
".",
"format",
"(",
"cls",
".",
"name",
",",
"val",
"[",
"1",
"]",
",",
"key",
")",
")",
"return",
"False",
"return",
"True",
"else",
":",
"err",
"=",
"\"{0} is not a Component or Contentline\"",
".",
"format",
"(",
"obj",
")",
"raise",
"base",
".",
"VObjectError",
"(",
"err",
")"
] | Check if the object satisfies this behavior's requirements.
@param obj:
The L{ContentLine<base.ContentLine>} or
L{Component<base.Component>} to be validated.
@param raiseException:
If True, raise a L{base.ValidateError} on validation failure.
Otherwise return a boolean.
@param complainUnrecognized:
If True, fail to validate if an uncrecognized parameter or child is
found. Otherwise log the lack of recognition. | [
"Check",
"if",
"the",
"object",
"satisfies",
"this",
"behavior",
"s",
"requirements",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/behavior.py#L63-L103 | train | 234,913 |
eventable/vobject | vobject/win32tz.py | pickNthWeekday | def pickNthWeekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek > 4 means last instance"""
first = datetime.datetime(year=year, month=month, hour=hour, minute=minute,
day=1)
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7 + 1))
for n in xrange(whichweek - 1, -1, -1):
dt = weekdayone + n * WEEKS
if dt.month == month:
return dt | python | def pickNthWeekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek > 4 means last instance"""
first = datetime.datetime(year=year, month=month, hour=hour, minute=minute,
day=1)
weekdayone = first.replace(day=((dayofweek - first.isoweekday()) % 7 + 1))
for n in xrange(whichweek - 1, -1, -1):
dt = weekdayone + n * WEEKS
if dt.month == month:
return dt | [
"def",
"pickNthWeekday",
"(",
"year",
",",
"month",
",",
"dayofweek",
",",
"hour",
",",
"minute",
",",
"whichweek",
")",
":",
"first",
"=",
"datetime",
".",
"datetime",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"hour",
"=",
"hour",
",",
"minute",
"=",
"minute",
",",
"day",
"=",
"1",
")",
"weekdayone",
"=",
"first",
".",
"replace",
"(",
"day",
"=",
"(",
"(",
"dayofweek",
"-",
"first",
".",
"isoweekday",
"(",
")",
")",
"%",
"7",
"+",
"1",
")",
")",
"for",
"n",
"in",
"xrange",
"(",
"whichweek",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"dt",
"=",
"weekdayone",
"+",
"n",
"*",
"WEEKS",
"if",
"dt",
".",
"month",
"==",
"month",
":",
"return",
"dt"
] | dayofweek == 0 means Sunday, whichweek > 4 means last instance | [
"dayofweek",
"==",
"0",
"means",
"Sunday",
"whichweek",
">",
"4",
"means",
"last",
"instance"
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/win32tz.py#L77-L85 | train | 234,914 |
eventable/vobject | vobject/ics_diff.py | deleteExtraneous | def deleteExtraneous(component, ignore_dtstamp=False):
"""
Recursively walk the component's children, deleting extraneous details like
X-VOBJ-ORIGINAL-TZID.
"""
for comp in component.components():
deleteExtraneous(comp, ignore_dtstamp)
for line in component.lines():
if 'X-VOBJ-ORIGINAL-TZID' in line.params:
del line.params['X-VOBJ-ORIGINAL-TZID']
if ignore_dtstamp and hasattr(component, 'dtstamp_list'):
del component.dtstamp_list | python | def deleteExtraneous(component, ignore_dtstamp=False):
"""
Recursively walk the component's children, deleting extraneous details like
X-VOBJ-ORIGINAL-TZID.
"""
for comp in component.components():
deleteExtraneous(comp, ignore_dtstamp)
for line in component.lines():
if 'X-VOBJ-ORIGINAL-TZID' in line.params:
del line.params['X-VOBJ-ORIGINAL-TZID']
if ignore_dtstamp and hasattr(component, 'dtstamp_list'):
del component.dtstamp_list | [
"def",
"deleteExtraneous",
"(",
"component",
",",
"ignore_dtstamp",
"=",
"False",
")",
":",
"for",
"comp",
"in",
"component",
".",
"components",
"(",
")",
":",
"deleteExtraneous",
"(",
"comp",
",",
"ignore_dtstamp",
")",
"for",
"line",
"in",
"component",
".",
"lines",
"(",
")",
":",
"if",
"'X-VOBJ-ORIGINAL-TZID'",
"in",
"line",
".",
"params",
":",
"del",
"line",
".",
"params",
"[",
"'X-VOBJ-ORIGINAL-TZID'",
"]",
"if",
"ignore_dtstamp",
"and",
"hasattr",
"(",
"component",
",",
"'dtstamp_list'",
")",
":",
"del",
"component",
".",
"dtstamp_list"
] | Recursively walk the component's children, deleting extraneous details like
X-VOBJ-ORIGINAL-TZID. | [
"Recursively",
"walk",
"the",
"component",
"s",
"children",
"deleting",
"extraneous",
"details",
"like",
"X",
"-",
"VOBJ",
"-",
"ORIGINAL",
"-",
"TZID",
"."
] | 498555a553155ea9b26aace93332ae79365ecb31 | https://github.com/eventable/vobject/blob/498555a553155ea9b26aace93332ae79365ecb31/vobject/ics_diff.py#L37-L48 | train | 234,915 |
SoftwareDefinedBuildings/XBOS | apps/hole_filling/pelican/backfill.py | fillPelicanHole | def fillPelicanHole(site, username, password, tstat_name, start_time, end_time):
"""Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT.
"""
start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name)
if heat_needs_fan is None:
return None
# Pelican's API only allows a query covering a time range of up to 1 month
# So we may need run multiple requests for historical data
history_blocks = []
while start < end:
block_start = start
block_end = min(start + timedelta(days=30), end)
blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end)
if blocks is None:
return None
history_blocks.extend(blocks)
start += timedelta(days=30, minutes=1)
output_rows = []
for block in history_blocks:
runStatus = block.find("runStatus").text
if runStatus.startswith("Heat"):
fanState = (heatNeedsFan == "Yes")
else:
fanState = (runStatus != "Off")
api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time)
# Need to convert seconds to nanoseconds
timestamp = int(api_time.timestamp() * 10**9)
output_rows.append({
"temperature": float(block.find("temperature").text),
"relative_humidity": float(block.find("humidity").text),
"heating_setpoint": float(block.find("heatSetting").text),
"cooling_setpoint": float(block.find("coolSetting").text),
# Driver explicitly uses "Schedule" field, but we don't have this in history
"override": block.find("setBy").text != "Schedule",
"fan": fanState,
"mode": _mode_name_mappings[block.find("system").text],
"state": _state_mappings.get(runStatus, 0),
"time": timestamp,
})
df = pd.DataFrame(output_rows)
df.drop_duplicates(subset="time", keep="first", inplace=True)
return df | python | def fillPelicanHole(site, username, password, tstat_name, start_time, end_time):
"""Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT.
"""
start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time)
heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name)
if heat_needs_fan is None:
return None
# Pelican's API only allows a query covering a time range of up to 1 month
# So we may need run multiple requests for historical data
history_blocks = []
while start < end:
block_start = start
block_end = min(start + timedelta(days=30), end)
blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end)
if blocks is None:
return None
history_blocks.extend(blocks)
start += timedelta(days=30, minutes=1)
output_rows = []
for block in history_blocks:
runStatus = block.find("runStatus").text
if runStatus.startswith("Heat"):
fanState = (heatNeedsFan == "Yes")
else:
fanState = (runStatus != "Off")
api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time)
# Need to convert seconds to nanoseconds
timestamp = int(api_time.timestamp() * 10**9)
output_rows.append({
"temperature": float(block.find("temperature").text),
"relative_humidity": float(block.find("humidity").text),
"heating_setpoint": float(block.find("heatSetting").text),
"cooling_setpoint": float(block.find("coolSetting").text),
# Driver explicitly uses "Schedule" field, but we don't have this in history
"override": block.find("setBy").text != "Schedule",
"fan": fanState,
"mode": _mode_name_mappings[block.find("system").text],
"state": _state_mappings.get(runStatus, 0),
"time": timestamp,
})
df = pd.DataFrame(output_rows)
df.drop_duplicates(subset="time", keep="first", inplace=True)
return df | [
"def",
"fillPelicanHole",
"(",
"site",
",",
"username",
",",
"password",
",",
"tstat_name",
",",
"start_time",
",",
"end_time",
")",
":",
"start",
"=",
"datetime",
".",
"strptime",
"(",
"start_time",
",",
"_INPUT_TIME_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
".",
"astimezone",
"(",
"_pelican_time",
")",
"end",
"=",
"datetime",
".",
"strptime",
"(",
"end_time",
",",
"_INPUT_TIME_FORMAT",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"pytz",
".",
"utc",
")",
".",
"astimezone",
"(",
"_pelican_time",
")",
"heat_needs_fan",
"=",
"_lookupHeatNeedsFan",
"(",
"site",
",",
"username",
",",
"password",
",",
"tstat_name",
")",
"if",
"heat_needs_fan",
"is",
"None",
":",
"return",
"None",
"# Pelican's API only allows a query covering a time range of up to 1 month",
"# So we may need run multiple requests for historical data",
"history_blocks",
"=",
"[",
"]",
"while",
"start",
"<",
"end",
":",
"block_start",
"=",
"start",
"block_end",
"=",
"min",
"(",
"start",
"+",
"timedelta",
"(",
"days",
"=",
"30",
")",
",",
"end",
")",
"blocks",
"=",
"_lookupHistoricalData",
"(",
"site",
",",
"username",
",",
"password",
",",
"tstat_name",
",",
"block_start",
",",
"block_end",
")",
"if",
"blocks",
"is",
"None",
":",
"return",
"None",
"history_blocks",
".",
"extend",
"(",
"blocks",
")",
"start",
"+=",
"timedelta",
"(",
"days",
"=",
"30",
",",
"minutes",
"=",
"1",
")",
"output_rows",
"=",
"[",
"]",
"for",
"block",
"in",
"history_blocks",
":",
"runStatus",
"=",
"block",
".",
"find",
"(",
"\"runStatus\"",
")",
".",
"text",
"if",
"runStatus",
".",
"startswith",
"(",
"\"Heat\"",
")",
":",
"fanState",
"=",
"(",
"heatNeedsFan",
"==",
"\"Yes\"",
")",
"else",
":",
"fanState",
"=",
"(",
"runStatus",
"!=",
"\"Off\"",
")",
"api_time",
"=",
"datetime",
".",
"strptime",
"(",
"block",
".",
"find",
"(",
"\"timestamp\"",
")",
".",
"text",
",",
"\"%Y-%m-%dT%H:%M\"",
")",
".",
"replace",
"(",
"tzinfo",
"=",
"_pelican_time",
")",
"# Need to convert seconds to nanoseconds",
"timestamp",
"=",
"int",
"(",
"api_time",
".",
"timestamp",
"(",
")",
"*",
"10",
"**",
"9",
")",
"output_rows",
".",
"append",
"(",
"{",
"\"temperature\"",
":",
"float",
"(",
"block",
".",
"find",
"(",
"\"temperature\"",
")",
".",
"text",
")",
",",
"\"relative_humidity\"",
":",
"float",
"(",
"block",
".",
"find",
"(",
"\"humidity\"",
")",
".",
"text",
")",
",",
"\"heating_setpoint\"",
":",
"float",
"(",
"block",
".",
"find",
"(",
"\"heatSetting\"",
")",
".",
"text",
")",
",",
"\"cooling_setpoint\"",
":",
"float",
"(",
"block",
".",
"find",
"(",
"\"coolSetting\"",
")",
".",
"text",
")",
",",
"# Driver explicitly uses \"Schedule\" field, but we don't have this in history",
"\"override\"",
":",
"block",
".",
"find",
"(",
"\"setBy\"",
")",
".",
"text",
"!=",
"\"Schedule\"",
",",
"\"fan\"",
":",
"fanState",
",",
"\"mode\"",
":",
"_mode_name_mappings",
"[",
"block",
".",
"find",
"(",
"\"system\"",
")",
".",
"text",
"]",
",",
"\"state\"",
":",
"_state_mappings",
".",
"get",
"(",
"runStatus",
",",
"0",
")",
",",
"\"time\"",
":",
"timestamp",
",",
"}",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"output_rows",
")",
"df",
".",
"drop_duplicates",
"(",
"subset",
"=",
"\"time\"",
",",
"keep",
"=",
"\"first\"",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] | Fill a hole in a Pelican thermostat's data stream.
Arguments:
site -- The thermostat's Pelican site name
username -- The Pelican username for the site
password -- The Pelican password for the site
tstat_name -- The name of the thermostat, as identified by Pelican
start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00"
end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00"
Returns:
A Pandas dataframe with historical Pelican data that falls between the
specified start and end times.
Note that this function assumes the Pelican thermostat's local time zone is
US/Pacific. It will properly handle PST vs. PDT. | [
"Fill",
"a",
"hole",
"in",
"a",
"Pelican",
"thermostat",
"s",
"data",
"stream",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/hole_filling/pelican/backfill.py#L73-L137 | train | 234,916 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py | Preprocess_Data.add_degree_days | def add_degree_days(self, col='OAT', hdh_cpoint=65, cdh_cpoint=65):
""" Adds Heating & Cooling Degree Hours.
Parameters
----------
col : str
Column name which contains the outdoor air temperature.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
"""
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
# Calculate hdh
data['hdh'] = data[col]
over_hdh = data.loc[:, col] > hdh_cpoint
data.loc[over_hdh, 'hdh'] = 0
data.loc[~over_hdh, 'hdh'] = hdh_cpoint - data.loc[~over_hdh, col]
# Calculate cdh
data['cdh'] = data[col]
under_cdh = data.loc[:, col] < cdh_cpoint
data.loc[under_cdh, 'cdh'] = 0
data.loc[~under_cdh, 'cdh'] = data.loc[~under_cdh, col] - cdh_cpoint
self.preprocessed_data = data | python | def add_degree_days(self, col='OAT', hdh_cpoint=65, cdh_cpoint=65):
""" Adds Heating & Cooling Degree Hours.
Parameters
----------
col : str
Column name which contains the outdoor air temperature.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
"""
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
# Calculate hdh
data['hdh'] = data[col]
over_hdh = data.loc[:, col] > hdh_cpoint
data.loc[over_hdh, 'hdh'] = 0
data.loc[~over_hdh, 'hdh'] = hdh_cpoint - data.loc[~over_hdh, col]
# Calculate cdh
data['cdh'] = data[col]
under_cdh = data.loc[:, col] < cdh_cpoint
data.loc[under_cdh, 'cdh'] = 0
data.loc[~under_cdh, 'cdh'] = data.loc[~under_cdh, col] - cdh_cpoint
self.preprocessed_data = data | [
"def",
"add_degree_days",
"(",
"self",
",",
"col",
"=",
"'OAT'",
",",
"hdh_cpoint",
"=",
"65",
",",
"cdh_cpoint",
"=",
"65",
")",
":",
"if",
"self",
".",
"preprocessed_data",
".",
"empty",
":",
"data",
"=",
"self",
".",
"original_data",
"else",
":",
"data",
"=",
"self",
".",
"preprocessed_data",
"# Calculate hdh",
"data",
"[",
"'hdh'",
"]",
"=",
"data",
"[",
"col",
"]",
"over_hdh",
"=",
"data",
".",
"loc",
"[",
":",
",",
"col",
"]",
">",
"hdh_cpoint",
"data",
".",
"loc",
"[",
"over_hdh",
",",
"'hdh'",
"]",
"=",
"0",
"data",
".",
"loc",
"[",
"~",
"over_hdh",
",",
"'hdh'",
"]",
"=",
"hdh_cpoint",
"-",
"data",
".",
"loc",
"[",
"~",
"over_hdh",
",",
"col",
"]",
"# Calculate cdh",
"data",
"[",
"'cdh'",
"]",
"=",
"data",
"[",
"col",
"]",
"under_cdh",
"=",
"data",
".",
"loc",
"[",
":",
",",
"col",
"]",
"<",
"cdh_cpoint",
"data",
".",
"loc",
"[",
"under_cdh",
",",
"'cdh'",
"]",
"=",
"0",
"data",
".",
"loc",
"[",
"~",
"under_cdh",
",",
"'cdh'",
"]",
"=",
"data",
".",
"loc",
"[",
"~",
"under_cdh",
",",
"col",
"]",
"-",
"cdh_cpoint",
"self",
".",
"preprocessed_data",
"=",
"data"
] | Adds Heating & Cooling Degree Hours.
Parameters
----------
col : str
Column name which contains the outdoor air temperature.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65. | [
"Adds",
"Heating",
"&",
"Cooling",
"Degree",
"Hours",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py#L34-L65 | train | 234,917 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py | Preprocess_Data.add_col_features | def add_col_features(self, col=None, degree=None):
""" Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
"""
if not col and not degree:
return
else:
if isinstance(col, list) and isinstance(degree, list):
if len(col) != len(degree):
print('col len: ', len(col))
print('degree len: ', len(degree))
raise ValueError('col and degree should have equal length.')
else:
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
for i in range(len(col)):
data.loc[:,col[i]+str(degree[i])] = pow(data.loc[:,col[i]],degree[i]) / pow(10,degree[i]-1)
self.preprocessed_data = data
else:
raise TypeError('col and degree should be lists.') | python | def add_col_features(self, col=None, degree=None):
""" Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
"""
if not col and not degree:
return
else:
if isinstance(col, list) and isinstance(degree, list):
if len(col) != len(degree):
print('col len: ', len(col))
print('degree len: ', len(degree))
raise ValueError('col and degree should have equal length.')
else:
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
for i in range(len(col)):
data.loc[:,col[i]+str(degree[i])] = pow(data.loc[:,col[i]],degree[i]) / pow(10,degree[i]-1)
self.preprocessed_data = data
else:
raise TypeError('col and degree should be lists.') | [
"def",
"add_col_features",
"(",
"self",
",",
"col",
"=",
"None",
",",
"degree",
"=",
"None",
")",
":",
"if",
"not",
"col",
"and",
"not",
"degree",
":",
"return",
"else",
":",
"if",
"isinstance",
"(",
"col",
",",
"list",
")",
"and",
"isinstance",
"(",
"degree",
",",
"list",
")",
":",
"if",
"len",
"(",
"col",
")",
"!=",
"len",
"(",
"degree",
")",
":",
"print",
"(",
"'col len: '",
",",
"len",
"(",
"col",
")",
")",
"print",
"(",
"'degree len: '",
",",
"len",
"(",
"degree",
")",
")",
"raise",
"ValueError",
"(",
"'col and degree should have equal length.'",
")",
"else",
":",
"if",
"self",
".",
"preprocessed_data",
".",
"empty",
":",
"data",
"=",
"self",
".",
"original_data",
"else",
":",
"data",
"=",
"self",
".",
"preprocessed_data",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"col",
")",
")",
":",
"data",
".",
"loc",
"[",
":",
",",
"col",
"[",
"i",
"]",
"+",
"str",
"(",
"degree",
"[",
"i",
"]",
")",
"]",
"=",
"pow",
"(",
"data",
".",
"loc",
"[",
":",
",",
"col",
"[",
"i",
"]",
"]",
",",
"degree",
"[",
"i",
"]",
")",
"/",
"pow",
"(",
"10",
",",
"degree",
"[",
"i",
"]",
"-",
"1",
")",
"self",
".",
"preprocessed_data",
"=",
"data",
"else",
":",
"raise",
"TypeError",
"(",
"'col and degree should be lists.'",
")"
] | Exponentiate columns of dataframe.
Basically this function squares/cubes a column.
e.g. df[col^2] = pow(df[col], degree) where degree=2.
Parameters
----------
col : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree. | [
"Exponentiate",
"columns",
"of",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py#L68-L103 | train | 234,918 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py | Preprocess_Data.standardize | def standardize(self):
""" Standardize data. """
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(data), columns=data.columns, index=data.index)
self.preprocessed_data = data | python | def standardize(self):
""" Standardize data. """
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
scaler = preprocessing.StandardScaler()
data = pd.DataFrame(scaler.fit_transform(data), columns=data.columns, index=data.index)
self.preprocessed_data = data | [
"def",
"standardize",
"(",
"self",
")",
":",
"if",
"self",
".",
"preprocessed_data",
".",
"empty",
":",
"data",
"=",
"self",
".",
"original_data",
"else",
":",
"data",
"=",
"self",
".",
"preprocessed_data",
"scaler",
"=",
"preprocessing",
".",
"StandardScaler",
"(",
")",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"scaler",
".",
"fit_transform",
"(",
"data",
")",
",",
"columns",
"=",
"data",
".",
"columns",
",",
"index",
"=",
"data",
".",
"index",
")",
"self",
".",
"preprocessed_data",
"=",
"data"
] | Standardize data. | [
"Standardize",
"data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py#L106-L116 | train | 234,919 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py | Preprocess_Data.normalize | def normalize(self):
""" Normalize data. """
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
data = pd.DataFrame(preprocessing.normalize(data), columns=data.columns, index=data.index)
self.preprocessed_data = data | python | def normalize(self):
""" Normalize data. """
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
data = pd.DataFrame(preprocessing.normalize(data), columns=data.columns, index=data.index)
self.preprocessed_data = data | [
"def",
"normalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"preprocessed_data",
".",
"empty",
":",
"data",
"=",
"self",
".",
"original_data",
"else",
":",
"data",
"=",
"self",
".",
"preprocessed_data",
"data",
"=",
"pd",
".",
"DataFrame",
"(",
"preprocessing",
".",
"normalize",
"(",
"data",
")",
",",
"columns",
"=",
"data",
".",
"columns",
",",
"index",
"=",
"data",
".",
"index",
")",
"self",
".",
"preprocessed_data",
"=",
"data"
] | Normalize data. | [
"Normalize",
"data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Preprocess_Data.py#L119-L128 | train | 234,920 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Preprocess_Data.py | Preprocess_Data.add_time_features | def add_time_features(self, year=False, month=False, week=True, tod=True, dow=True):
""" Add time features to dataframe.
Parameters
----------
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
"""
var_to_expand = []
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
if year:
data["year"] = data.index.year
var_to_expand.append("year")
if month:
data["month"] = data.index.month
var_to_expand.append("month")
if week:
data["week"] = data.index.week
var_to_expand.append("week")
if tod:
data["tod"] = data.index.hour
var_to_expand.append("tod")
if dow:
data["dow"] = data.index.weekday
var_to_expand.append("dow")
# One-hot encode the time features
for var in var_to_expand:
add_var = pd.get_dummies(data[var], prefix=var, drop_first=True)
# Add all the columns to the model data
data = data.join(add_var)
# Drop the original column that was expanded
data.drop(columns=[var], inplace=True)
self.preprocessed_data = data | python | def add_time_features(self, year=False, month=False, week=True, tod=True, dow=True):
""" Add time features to dataframe.
Parameters
----------
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
"""
var_to_expand = []
if self.preprocessed_data.empty:
data = self.original_data
else:
data = self.preprocessed_data
if year:
data["year"] = data.index.year
var_to_expand.append("year")
if month:
data["month"] = data.index.month
var_to_expand.append("month")
if week:
data["week"] = data.index.week
var_to_expand.append("week")
if tod:
data["tod"] = data.index.hour
var_to_expand.append("tod")
if dow:
data["dow"] = data.index.weekday
var_to_expand.append("dow")
# One-hot encode the time features
for var in var_to_expand:
add_var = pd.get_dummies(data[var], prefix=var, drop_first=True)
# Add all the columns to the model data
data = data.join(add_var)
# Drop the original column that was expanded
data.drop(columns=[var], inplace=True)
self.preprocessed_data = data | [
"def",
"add_time_features",
"(",
"self",
",",
"year",
"=",
"False",
",",
"month",
"=",
"False",
",",
"week",
"=",
"True",
",",
"tod",
"=",
"True",
",",
"dow",
"=",
"True",
")",
":",
"var_to_expand",
"=",
"[",
"]",
"if",
"self",
".",
"preprocessed_data",
".",
"empty",
":",
"data",
"=",
"self",
".",
"original_data",
"else",
":",
"data",
"=",
"self",
".",
"preprocessed_data",
"if",
"year",
":",
"data",
"[",
"\"year\"",
"]",
"=",
"data",
".",
"index",
".",
"year",
"var_to_expand",
".",
"append",
"(",
"\"year\"",
")",
"if",
"month",
":",
"data",
"[",
"\"month\"",
"]",
"=",
"data",
".",
"index",
".",
"month",
"var_to_expand",
".",
"append",
"(",
"\"month\"",
")",
"if",
"week",
":",
"data",
"[",
"\"week\"",
"]",
"=",
"data",
".",
"index",
".",
"week",
"var_to_expand",
".",
"append",
"(",
"\"week\"",
")",
"if",
"tod",
":",
"data",
"[",
"\"tod\"",
"]",
"=",
"data",
".",
"index",
".",
"hour",
"var_to_expand",
".",
"append",
"(",
"\"tod\"",
")",
"if",
"dow",
":",
"data",
"[",
"\"dow\"",
"]",
"=",
"data",
".",
"index",
".",
"weekday",
"var_to_expand",
".",
"append",
"(",
"\"dow\"",
")",
"# One-hot encode the time features",
"for",
"var",
"in",
"var_to_expand",
":",
"add_var",
"=",
"pd",
".",
"get_dummies",
"(",
"data",
"[",
"var",
"]",
",",
"prefix",
"=",
"var",
",",
"drop_first",
"=",
"True",
")",
"# Add all the columns to the model data",
"data",
"=",
"data",
".",
"join",
"(",
"add_var",
")",
"# Drop the original column that was expanded",
"data",
".",
"drop",
"(",
"columns",
"=",
"[",
"var",
"]",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"preprocessed_data",
"=",
"data"
] | Add time features to dataframe.
Parameters
----------
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week. | [
"Add",
"time",
"features",
"to",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Preprocess_Data.py#L135-L187 | train | 234,921 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.split_data | def split_data(self):
""" Split data according to baseline and projection time period values. """
try:
# Extract data ranging in time_period1
time_period1 = (slice(self.baseline_period[0], self.baseline_period[1]))
self.baseline_in = self.original_data.loc[time_period1, self.input_col]
self.baseline_out = self.original_data.loc[time_period1, self.output_col]
if self.exclude_time_period:
for i in range(0, len(self.exclude_time_period), 2):
# Drop data ranging in exclude_time_period1
exclude_time_period1 = (slice(self.exclude_time_period[i], self.exclude_time_period[i+1]))
self.baseline_in.drop(self.baseline_in.loc[exclude_time_period1].index, axis=0, inplace=True)
self.baseline_out.drop(self.baseline_out.loc[exclude_time_period1].index, axis=0, inplace=True)
except Exception as e:
raise e
# CHECK: Can optimize this part
# Error checking to ensure time_period values are valid
if self.projection_period:
for i in range(0, len(self.projection_period), 2):
period = (slice(self.projection_period[i], self.projection_period[i+1]))
try:
self.original_data.loc[period, self.input_col]
self.original_data.loc[period, self.output_col]
except Exception as e:
raise e | python | def split_data(self):
""" Split data according to baseline and projection time period values. """
try:
# Extract data ranging in time_period1
time_period1 = (slice(self.baseline_period[0], self.baseline_period[1]))
self.baseline_in = self.original_data.loc[time_period1, self.input_col]
self.baseline_out = self.original_data.loc[time_period1, self.output_col]
if self.exclude_time_period:
for i in range(0, len(self.exclude_time_period), 2):
# Drop data ranging in exclude_time_period1
exclude_time_period1 = (slice(self.exclude_time_period[i], self.exclude_time_period[i+1]))
self.baseline_in.drop(self.baseline_in.loc[exclude_time_period1].index, axis=0, inplace=True)
self.baseline_out.drop(self.baseline_out.loc[exclude_time_period1].index, axis=0, inplace=True)
except Exception as e:
raise e
# CHECK: Can optimize this part
# Error checking to ensure time_period values are valid
if self.projection_period:
for i in range(0, len(self.projection_period), 2):
period = (slice(self.projection_period[i], self.projection_period[i+1]))
try:
self.original_data.loc[period, self.input_col]
self.original_data.loc[period, self.output_col]
except Exception as e:
raise e | [
"def",
"split_data",
"(",
"self",
")",
":",
"try",
":",
"# Extract data ranging in time_period1",
"time_period1",
"=",
"(",
"slice",
"(",
"self",
".",
"baseline_period",
"[",
"0",
"]",
",",
"self",
".",
"baseline_period",
"[",
"1",
"]",
")",
")",
"self",
".",
"baseline_in",
"=",
"self",
".",
"original_data",
".",
"loc",
"[",
"time_period1",
",",
"self",
".",
"input_col",
"]",
"self",
".",
"baseline_out",
"=",
"self",
".",
"original_data",
".",
"loc",
"[",
"time_period1",
",",
"self",
".",
"output_col",
"]",
"if",
"self",
".",
"exclude_time_period",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"exclude_time_period",
")",
",",
"2",
")",
":",
"# Drop data ranging in exclude_time_period1",
"exclude_time_period1",
"=",
"(",
"slice",
"(",
"self",
".",
"exclude_time_period",
"[",
"i",
"]",
",",
"self",
".",
"exclude_time_period",
"[",
"i",
"+",
"1",
"]",
")",
")",
"self",
".",
"baseline_in",
".",
"drop",
"(",
"self",
".",
"baseline_in",
".",
"loc",
"[",
"exclude_time_period1",
"]",
".",
"index",
",",
"axis",
"=",
"0",
",",
"inplace",
"=",
"True",
")",
"self",
".",
"baseline_out",
".",
"drop",
"(",
"self",
".",
"baseline_out",
".",
"loc",
"[",
"exclude_time_period1",
"]",
".",
"index",
",",
"axis",
"=",
"0",
",",
"inplace",
"=",
"True",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"# CHECK: Can optimize this part",
"# Error checking to ensure time_period values are valid",
"if",
"self",
".",
"projection_period",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"self",
".",
"projection_period",
")",
",",
"2",
")",
":",
"period",
"=",
"(",
"slice",
"(",
"self",
".",
"projection_period",
"[",
"i",
"]",
",",
"self",
".",
"projection_period",
"[",
"i",
"+",
"1",
"]",
")",
")",
"try",
":",
"self",
".",
"original_data",
".",
"loc",
"[",
"period",
",",
"self",
".",
"input_col",
"]",
"self",
".",
"original_data",
".",
"loc",
"[",
"period",
",",
"self",
".",
"output_col",
"]",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e"
] | Split data according to baseline and projection time period values. | [
"Split",
"data",
"according",
"to",
"baseline",
"and",
"projection",
"time",
"period",
"values",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L125-L152 | train | 234,922 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.linear_regression | def linear_regression(self):
""" Linear Regression.
This function runs linear regression and stores the,
1. Model
2. Model name
3. Mean score of cross validation
4. Metrics
"""
model = LinearRegression()
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = sum(scores) / len(scores)
self.models.append(model)
self.model_names.append('Linear Regression')
self.max_scores.append(mean_score)
self.metrics['Linear Regression'] = {}
self.metrics['Linear Regression']['R2'] = mean_score
self.metrics['Linear Regression']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | python | def linear_regression(self):
""" Linear Regression.
This function runs linear regression and stores the,
1. Model
2. Model name
3. Mean score of cross validation
4. Metrics
"""
model = LinearRegression()
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = sum(scores) / len(scores)
self.models.append(model)
self.model_names.append('Linear Regression')
self.max_scores.append(mean_score)
self.metrics['Linear Regression'] = {}
self.metrics['Linear Regression']['R2'] = mean_score
self.metrics['Linear Regression']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | [
"def",
"linear_regression",
"(",
"self",
")",
":",
"model",
"=",
"LinearRegression",
"(",
")",
"scores",
"=",
"[",
"]",
"kfold",
"=",
"KFold",
"(",
"n_splits",
"=",
"self",
".",
"cv",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"42",
")",
"for",
"i",
",",
"(",
"train",
",",
"test",
")",
"in",
"enumerate",
"(",
"kfold",
".",
"split",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
")",
")",
":",
"model",
".",
"fit",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"train",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"train",
"]",
")",
"scores",
".",
"append",
"(",
"model",
".",
"score",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"test",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"test",
"]",
")",
")",
"mean_score",
"=",
"sum",
"(",
"scores",
")",
"/",
"len",
"(",
"scores",
")",
"self",
".",
"models",
".",
"append",
"(",
"model",
")",
"self",
".",
"model_names",
".",
"append",
"(",
"'Linear Regression'",
")",
"self",
".",
"max_scores",
".",
"append",
"(",
"mean_score",
")",
"self",
".",
"metrics",
"[",
"'Linear Regression'",
"]",
"=",
"{",
"}",
"self",
".",
"metrics",
"[",
"'Linear Regression'",
"]",
"[",
"'R2'",
"]",
"=",
"mean_score",
"self",
".",
"metrics",
"[",
"'Linear Regression'",
"]",
"[",
"'Adj R2'",
"]",
"=",
"self",
".",
"adj_r2",
"(",
"mean_score",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"1",
"]",
")"
] | Linear Regression.
This function runs linear regression and stores the,
1. Model
2. Model name
3. Mean score of cross validation
4. Metrics | [
"Linear",
"Regression",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L176-L203 | train | 234,923 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.lasso_regression | def lasso_regression(self):
""" Lasso Regression.
This function runs lasso regression and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
score_list = []
max_score = float('-inf')
best_alpha = None
for alpha in self.alphas:
# model = Lasso(normalize=True, alpha=alpha, max_iter=5000)
model = Lasso(alpha=alpha, max_iter=5000)
model.fit(self.baseline_in, self.baseline_out.values.ravel())
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
score_list.append(mean_score)
if mean_score > max_score:
max_score = mean_score
best_alpha = alpha
# self.models.append(Lasso(normalize=True, alpha=best_alpha, max_iter=5000))
self.models.append(Lasso(alpha=best_alpha, max_iter=5000))
self.model_names.append('Lasso Regression')
self.max_scores.append(max_score)
self.metrics['Lasso Regression'] = {}
self.metrics['Lasso Regression']['R2'] = max_score
self.metrics['Lasso Regression']['Adj R2'] = self.adj_r2(max_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | python | def lasso_regression(self):
""" Lasso Regression.
This function runs lasso regression and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
score_list = []
max_score = float('-inf')
best_alpha = None
for alpha in self.alphas:
# model = Lasso(normalize=True, alpha=alpha, max_iter=5000)
model = Lasso(alpha=alpha, max_iter=5000)
model.fit(self.baseline_in, self.baseline_out.values.ravel())
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
score_list.append(mean_score)
if mean_score > max_score:
max_score = mean_score
best_alpha = alpha
# self.models.append(Lasso(normalize=True, alpha=best_alpha, max_iter=5000))
self.models.append(Lasso(alpha=best_alpha, max_iter=5000))
self.model_names.append('Lasso Regression')
self.max_scores.append(max_score)
self.metrics['Lasso Regression'] = {}
self.metrics['Lasso Regression']['R2'] = max_score
self.metrics['Lasso Regression']['Adj R2'] = self.adj_r2(max_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | [
"def",
"lasso_regression",
"(",
"self",
")",
":",
"score_list",
"=",
"[",
"]",
"max_score",
"=",
"float",
"(",
"'-inf'",
")",
"best_alpha",
"=",
"None",
"for",
"alpha",
"in",
"self",
".",
"alphas",
":",
"# model = Lasso(normalize=True, alpha=alpha, max_iter=5000)",
"model",
"=",
"Lasso",
"(",
"alpha",
"=",
"alpha",
",",
"max_iter",
"=",
"5000",
")",
"model",
".",
"fit",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
".",
"values",
".",
"ravel",
"(",
")",
")",
"scores",
"=",
"[",
"]",
"kfold",
"=",
"KFold",
"(",
"n_splits",
"=",
"self",
".",
"cv",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"42",
")",
"for",
"i",
",",
"(",
"train",
",",
"test",
")",
"in",
"enumerate",
"(",
"kfold",
".",
"split",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
")",
")",
":",
"model",
".",
"fit",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"train",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"train",
"]",
")",
"scores",
".",
"append",
"(",
"model",
".",
"score",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"test",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"test",
"]",
")",
")",
"mean_score",
"=",
"np",
".",
"mean",
"(",
"scores",
")",
"score_list",
".",
"append",
"(",
"mean_score",
")",
"if",
"mean_score",
">",
"max_score",
":",
"max_score",
"=",
"mean_score",
"best_alpha",
"=",
"alpha",
"# self.models.append(Lasso(normalize=True, alpha=best_alpha, max_iter=5000))",
"self",
".",
"models",
".",
"append",
"(",
"Lasso",
"(",
"alpha",
"=",
"best_alpha",
",",
"max_iter",
"=",
"5000",
")",
")",
"self",
".",
"model_names",
".",
"append",
"(",
"'Lasso Regression'",
")",
"self",
".",
"max_scores",
".",
"append",
"(",
"max_score",
")",
"self",
".",
"metrics",
"[",
"'Lasso Regression'",
"]",
"=",
"{",
"}",
"self",
".",
"metrics",
"[",
"'Lasso Regression'",
"]",
"[",
"'R2'",
"]",
"=",
"max_score",
"self",
".",
"metrics",
"[",
"'Lasso Regression'",
"]",
"[",
"'Adj R2'",
"]",
"=",
"self",
".",
"adj_r2",
"(",
"max_score",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"1",
"]",
")"
] | Lasso Regression.
This function runs lasso regression and stores the,
1. Model
2. Model name
3. Max score
4. Metrics | [
"Lasso",
"Regression",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L206-L246 | train | 234,924 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.random_forest | def random_forest(self):
""" Random Forest.
This function runs random forest and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
model = RandomForestRegressor(random_state=42)
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
self.models.append(model)
self.model_names.append('Random Forest Regressor')
self.max_scores.append(mean_score)
self.metrics['Random Forest Regressor'] = {}
self.metrics['Random Forest Regressor']['R2'] = mean_score
self.metrics['Random Forest Regressor']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | python | def random_forest(self):
""" Random Forest.
This function runs random forest and stores the,
1. Model
2. Model name
3. Max score
4. Metrics
"""
model = RandomForestRegressor(random_state=42)
scores = []
kfold = KFold(n_splits=self.cv, shuffle=True, random_state=42)
for i, (train, test) in enumerate(kfold.split(self.baseline_in, self.baseline_out)):
model.fit(self.baseline_in.iloc[train], self.baseline_out.iloc[train])
scores.append(model.score(self.baseline_in.iloc[test], self.baseline_out.iloc[test]))
mean_score = np.mean(scores)
self.models.append(model)
self.model_names.append('Random Forest Regressor')
self.max_scores.append(mean_score)
self.metrics['Random Forest Regressor'] = {}
self.metrics['Random Forest Regressor']['R2'] = mean_score
self.metrics['Random Forest Regressor']['Adj R2'] = self.adj_r2(mean_score, self.baseline_in.shape[0], self.baseline_in.shape[1]) | [
"def",
"random_forest",
"(",
"self",
")",
":",
"model",
"=",
"RandomForestRegressor",
"(",
"random_state",
"=",
"42",
")",
"scores",
"=",
"[",
"]",
"kfold",
"=",
"KFold",
"(",
"n_splits",
"=",
"self",
".",
"cv",
",",
"shuffle",
"=",
"True",
",",
"random_state",
"=",
"42",
")",
"for",
"i",
",",
"(",
"train",
",",
"test",
")",
"in",
"enumerate",
"(",
"kfold",
".",
"split",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
")",
")",
":",
"model",
".",
"fit",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"train",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"train",
"]",
")",
"scores",
".",
"append",
"(",
"model",
".",
"score",
"(",
"self",
".",
"baseline_in",
".",
"iloc",
"[",
"test",
"]",
",",
"self",
".",
"baseline_out",
".",
"iloc",
"[",
"test",
"]",
")",
")",
"mean_score",
"=",
"np",
".",
"mean",
"(",
"scores",
")",
"self",
".",
"models",
".",
"append",
"(",
"model",
")",
"self",
".",
"model_names",
".",
"append",
"(",
"'Random Forest Regressor'",
")",
"self",
".",
"max_scores",
".",
"append",
"(",
"mean_score",
")",
"self",
".",
"metrics",
"[",
"'Random Forest Regressor'",
"]",
"=",
"{",
"}",
"self",
".",
"metrics",
"[",
"'Random Forest Regressor'",
"]",
"[",
"'R2'",
"]",
"=",
"mean_score",
"self",
".",
"metrics",
"[",
"'Random Forest Regressor'",
"]",
"[",
"'Adj R2'",
"]",
"=",
"self",
".",
"adj_r2",
"(",
"mean_score",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"1",
"]",
")"
] | Random Forest.
This function runs random forest and stores the,
1. Model
2. Model name
3. Max score
4. Metrics | [
"Random",
"Forest",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L338-L364 | train | 234,925 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.run_models | def run_models(self):
""" Run all models.
Returns
-------
model
Best model
dict
Metrics of the models
"""
self.linear_regression()
self.lasso_regression()
self.ridge_regression()
self.elastic_net_regression()
self.random_forest()
self.ann()
# Index of the model with max score
best_model_index = self.max_scores.index(max(self.max_scores))
# Store name of the optimal model
self.best_model_name = self.model_names[best_model_index]
# Store optimal model
self.best_model = self.models[best_model_index]
return self.metrics | python | def run_models(self):
""" Run all models.
Returns
-------
model
Best model
dict
Metrics of the models
"""
self.linear_regression()
self.lasso_regression()
self.ridge_regression()
self.elastic_net_regression()
self.random_forest()
self.ann()
# Index of the model with max score
best_model_index = self.max_scores.index(max(self.max_scores))
# Store name of the optimal model
self.best_model_name = self.model_names[best_model_index]
# Store optimal model
self.best_model = self.models[best_model_index]
return self.metrics | [
"def",
"run_models",
"(",
"self",
")",
":",
"self",
".",
"linear_regression",
"(",
")",
"self",
".",
"lasso_regression",
"(",
")",
"self",
".",
"ridge_regression",
"(",
")",
"self",
".",
"elastic_net_regression",
"(",
")",
"self",
".",
"random_forest",
"(",
")",
"self",
".",
"ann",
"(",
")",
"# Index of the model with max score",
"best_model_index",
"=",
"self",
".",
"max_scores",
".",
"index",
"(",
"max",
"(",
"self",
".",
"max_scores",
")",
")",
"# Store name of the optimal model",
"self",
".",
"best_model_name",
"=",
"self",
".",
"model_names",
"[",
"best_model_index",
"]",
"# Store optimal model",
"self",
".",
"best_model",
"=",
"self",
".",
"models",
"[",
"best_model_index",
"]",
"return",
"self",
".",
"metrics"
] | Run all models.
Returns
-------
model
Best model
dict
Metrics of the models | [
"Run",
"all",
"models",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L396-L424 | train | 234,926 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.custom_model | def custom_model(self, func):
""" Run custom model provided by user.
To Do,
1. Define custom function's parameters, its data types, and return types
Parameters
----------
func : function
Custom function
Returns
-------
dict
Custom function's metrics
"""
y_pred = func(self.baseline_in, self.baseline_out)
self.custom_metrics = {}
self.custom_metrics['r2'] = r2_score(self.baseline_out, y_pred)
self.custom_metrics['mse'] = mean_squared_error(self.baseline_out, y_pred)
self.custom_metrics['rmse'] = math.sqrt(self.custom_metrics['mse'])
self.custom_metrics['adj_r2'] = self.adj_r2(self.custom_metrics['r2'], self.baseline_in.shape[0], self.baseline_in.shape[1])
return self.custom_metrics | python | def custom_model(self, func):
""" Run custom model provided by user.
To Do,
1. Define custom function's parameters, its data types, and return types
Parameters
----------
func : function
Custom function
Returns
-------
dict
Custom function's metrics
"""
y_pred = func(self.baseline_in, self.baseline_out)
self.custom_metrics = {}
self.custom_metrics['r2'] = r2_score(self.baseline_out, y_pred)
self.custom_metrics['mse'] = mean_squared_error(self.baseline_out, y_pred)
self.custom_metrics['rmse'] = math.sqrt(self.custom_metrics['mse'])
self.custom_metrics['adj_r2'] = self.adj_r2(self.custom_metrics['r2'], self.baseline_in.shape[0], self.baseline_in.shape[1])
return self.custom_metrics | [
"def",
"custom_model",
"(",
"self",
",",
"func",
")",
":",
"y_pred",
"=",
"func",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
")",
"self",
".",
"custom_metrics",
"=",
"{",
"}",
"self",
".",
"custom_metrics",
"[",
"'r2'",
"]",
"=",
"r2_score",
"(",
"self",
".",
"baseline_out",
",",
"y_pred",
")",
"self",
".",
"custom_metrics",
"[",
"'mse'",
"]",
"=",
"mean_squared_error",
"(",
"self",
".",
"baseline_out",
",",
"y_pred",
")",
"self",
".",
"custom_metrics",
"[",
"'rmse'",
"]",
"=",
"math",
".",
"sqrt",
"(",
"self",
".",
"custom_metrics",
"[",
"'mse'",
"]",
")",
"self",
".",
"custom_metrics",
"[",
"'adj_r2'",
"]",
"=",
"self",
".",
"adj_r2",
"(",
"self",
".",
"custom_metrics",
"[",
"'r2'",
"]",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"0",
"]",
",",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"1",
"]",
")",
"return",
"self",
".",
"custom_metrics"
] | Run custom model provided by user.
To Do,
1. Define custom function's parameters, its data types, and return types
Parameters
----------
func : function
Custom function
Returns
-------
dict
Custom function's metrics | [
"Run",
"custom",
"model",
"provided",
"by",
"user",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L427-L453 | train | 234,927 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Model_Data.py | Model_Data.best_model_fit | def best_model_fit(self):
""" Fit data to optimal model and return its metrics.
Returns
-------
dict
Best model's metrics
"""
self.best_model.fit(self.baseline_in, self.baseline_out)
self.y_true = self.baseline_out # Pandas Series
self.y_pred = self.best_model.predict(self.baseline_in) # numpy.ndarray
# Set all negative values to zero since energy > 0
self.y_pred[self.y_pred < 0] = 0
# n and k values for adj r2 score
self.n_test = self.baseline_in.shape[0] # Number of points in data sample
self.k_test = self.baseline_in.shape[1] # Number of variables in model, excluding the constant
# Store best model's metrics
self.best_metrics['name'] = self.best_model_name
self.best_metrics['r2'] = r2_score(self.y_true, self.y_pred)
self.best_metrics['mse'] = mean_squared_error(self.y_true, self.y_pred)
self.best_metrics['rmse'] = math.sqrt(self.best_metrics['mse'])
self.best_metrics['adj_r2'] = self.adj_r2(self.best_metrics['r2'], self.n_test, self.k_test)
# Normalized Mean Bias Error
numerator = sum(self.y_true - self.y_pred)
denominator = (self.n_test - self.k_test) * (sum(self.y_true) / len(self.y_true))
self.best_metrics['nmbe'] = numerator / denominator
# MAPE can't have 0 values in baseline_out -> divide by zero error
self.baseline_out_copy = self.baseline_out[self.baseline_out != 0]
self.baseline_in_copy = self.baseline_in[self.baseline_in.index.isin(self.baseline_out_copy.index)]
self.y_true_copy = self.baseline_out_copy # Pandas Series
self.y_pred_copy = self.best_model.predict(self.baseline_in_copy) # numpy.ndarray
self.best_metrics['mape'] = np.mean(np.abs((self.y_true_copy - self.y_pred_copy) / self.y_true_copy)) * 100
return self.best_metrics | python | def best_model_fit(self):
""" Fit data to optimal model and return its metrics.
Returns
-------
dict
Best model's metrics
"""
self.best_model.fit(self.baseline_in, self.baseline_out)
self.y_true = self.baseline_out # Pandas Series
self.y_pred = self.best_model.predict(self.baseline_in) # numpy.ndarray
# Set all negative values to zero since energy > 0
self.y_pred[self.y_pred < 0] = 0
# n and k values for adj r2 score
self.n_test = self.baseline_in.shape[0] # Number of points in data sample
self.k_test = self.baseline_in.shape[1] # Number of variables in model, excluding the constant
# Store best model's metrics
self.best_metrics['name'] = self.best_model_name
self.best_metrics['r2'] = r2_score(self.y_true, self.y_pred)
self.best_metrics['mse'] = mean_squared_error(self.y_true, self.y_pred)
self.best_metrics['rmse'] = math.sqrt(self.best_metrics['mse'])
self.best_metrics['adj_r2'] = self.adj_r2(self.best_metrics['r2'], self.n_test, self.k_test)
# Normalized Mean Bias Error
numerator = sum(self.y_true - self.y_pred)
denominator = (self.n_test - self.k_test) * (sum(self.y_true) / len(self.y_true))
self.best_metrics['nmbe'] = numerator / denominator
# MAPE can't have 0 values in baseline_out -> divide by zero error
self.baseline_out_copy = self.baseline_out[self.baseline_out != 0]
self.baseline_in_copy = self.baseline_in[self.baseline_in.index.isin(self.baseline_out_copy.index)]
self.y_true_copy = self.baseline_out_copy # Pandas Series
self.y_pred_copy = self.best_model.predict(self.baseline_in_copy) # numpy.ndarray
self.best_metrics['mape'] = np.mean(np.abs((self.y_true_copy - self.y_pred_copy) / self.y_true_copy)) * 100
return self.best_metrics | [
"def",
"best_model_fit",
"(",
"self",
")",
":",
"self",
".",
"best_model",
".",
"fit",
"(",
"self",
".",
"baseline_in",
",",
"self",
".",
"baseline_out",
")",
"self",
".",
"y_true",
"=",
"self",
".",
"baseline_out",
"# Pandas Series",
"self",
".",
"y_pred",
"=",
"self",
".",
"best_model",
".",
"predict",
"(",
"self",
".",
"baseline_in",
")",
"# numpy.ndarray",
"# Set all negative values to zero since energy > 0",
"self",
".",
"y_pred",
"[",
"self",
".",
"y_pred",
"<",
"0",
"]",
"=",
"0",
"# n and k values for adj r2 score",
"self",
".",
"n_test",
"=",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"0",
"]",
"# Number of points in data sample",
"self",
".",
"k_test",
"=",
"self",
".",
"baseline_in",
".",
"shape",
"[",
"1",
"]",
"# Number of variables in model, excluding the constant",
"# Store best model's metrics",
"self",
".",
"best_metrics",
"[",
"'name'",
"]",
"=",
"self",
".",
"best_model_name",
"self",
".",
"best_metrics",
"[",
"'r2'",
"]",
"=",
"r2_score",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_pred",
")",
"self",
".",
"best_metrics",
"[",
"'mse'",
"]",
"=",
"mean_squared_error",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_pred",
")",
"self",
".",
"best_metrics",
"[",
"'rmse'",
"]",
"=",
"math",
".",
"sqrt",
"(",
"self",
".",
"best_metrics",
"[",
"'mse'",
"]",
")",
"self",
".",
"best_metrics",
"[",
"'adj_r2'",
"]",
"=",
"self",
".",
"adj_r2",
"(",
"self",
".",
"best_metrics",
"[",
"'r2'",
"]",
",",
"self",
".",
"n_test",
",",
"self",
".",
"k_test",
")",
"# Normalized Mean Bias Error",
"numerator",
"=",
"sum",
"(",
"self",
".",
"y_true",
"-",
"self",
".",
"y_pred",
")",
"denominator",
"=",
"(",
"self",
".",
"n_test",
"-",
"self",
".",
"k_test",
")",
"*",
"(",
"sum",
"(",
"self",
".",
"y_true",
")",
"/",
"len",
"(",
"self",
".",
"y_true",
")",
")",
"self",
".",
"best_metrics",
"[",
"'nmbe'",
"]",
"=",
"numerator",
"/",
"denominator",
"# MAPE can't have 0 values in baseline_out -> divide by zero error",
"self",
".",
"baseline_out_copy",
"=",
"self",
".",
"baseline_out",
"[",
"self",
".",
"baseline_out",
"!=",
"0",
"]",
"self",
".",
"baseline_in_copy",
"=",
"self",
".",
"baseline_in",
"[",
"self",
".",
"baseline_in",
".",
"index",
".",
"isin",
"(",
"self",
".",
"baseline_out_copy",
".",
"index",
")",
"]",
"self",
".",
"y_true_copy",
"=",
"self",
".",
"baseline_out_copy",
"# Pandas Series",
"self",
".",
"y_pred_copy",
"=",
"self",
".",
"best_model",
".",
"predict",
"(",
"self",
".",
"baseline_in_copy",
")",
"# numpy.ndarray",
"self",
".",
"best_metrics",
"[",
"'mape'",
"]",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"abs",
"(",
"(",
"self",
".",
"y_true_copy",
"-",
"self",
".",
"y_pred_copy",
")",
"/",
"self",
".",
"y_true_copy",
")",
")",
"*",
"100",
"return",
"self",
".",
"best_metrics"
] | Fit data to optimal model and return its metrics.
Returns
-------
dict
Best model's metrics | [
"Fit",
"data",
"to",
"optimal",
"model",
"and",
"return",
"its",
"metrics",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Model_Data.py#L456-L497 | train | 234,928 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Plot_Data.py | Plot_Data.correlation_plot | def correlation_plot(self, data):
""" Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
"""
# CHECK: Add saved filename in result.json
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig | python | def correlation_plot(self, data):
""" Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
"""
# CHECK: Add saved filename in result.json
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig | [
"def",
"correlation_plot",
"(",
"self",
",",
"data",
")",
":",
"# CHECK: Add saved filename in result.json",
"fig",
"=",
"plt",
".",
"figure",
"(",
"Plot_Data",
".",
"count",
")",
"corr",
"=",
"data",
".",
"corr",
"(",
")",
"ax",
"=",
"sns",
".",
"heatmap",
"(",
"corr",
")",
"Plot_Data",
".",
"count",
"+=",
"1",
"return",
"fig"
] | Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap. | [
"Create",
"heatmap",
"of",
"Pearson",
"s",
"correlation",
"coefficient",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Plot_Data.py#L42-L63 | train | 234,929 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Plot_Data.py | Plot_Data.baseline_projection_plot | def baseline_projection_plot(self, y_true, y_pred,
baseline_period, projection_period,
model_name, adj_r2,
data, input_col, output_col, model,
site):
""" Create baseline and projection plots.
Parameters
----------
y_true : pd.Series()
Actual y values.
y_pred : np.ndarray
Predicted y values.
baseline_period : list(str)
Baseline period.
projection_period : list(str)
Projection periods.
model_name : str
Optimal model's name.
adj_r2 : float
Adjusted R2 score of optimal model.
data : pd.Dataframe()
Data containing real values.
input_col : list(str)
Predictor column(s).
output_col : str
Target column.
model : func
Optimal model.
Returns
-------
matplotlib.figure
Baseline plot
"""
# Baseline and projection plots
fig = plt.figure(Plot_Data.count)
# Number of plots to display
if projection_period:
nrows = len(baseline_period) + len(projection_period) / 2
else:
nrows = len(baseline_period) / 2
# Plot 1 - Baseline
base_df = pd.DataFrame()
base_df['y_true'] = y_true
base_df['y_pred'] = y_pred
ax1 = fig.add_subplot(nrows, 1, 1)
base_df.plot(ax=ax1, figsize=self.figsize,
title='Baseline Period ({}-{}). \nBest Model: {}. \nBaseline Adj R2: {}. \nSite: {}.'.format(baseline_period[0], baseline_period[1],
model_name, adj_r2, site))
if projection_period:
# Display projection plots
num_plot = 2
for i in range(0, len(projection_period), 2):
ax = fig.add_subplot(nrows, 1, num_plot)
period = (slice(projection_period[i], projection_period[i+1]))
project_df = pd.DataFrame()
try:
project_df['y_true'] = data.loc[period, output_col]
project_df['y_pred'] = model.predict(data.loc[period, input_col])
# Set all negative values to zero since energy > 0
project_df['y_pred'][project_df['y_pred'] < 0] = 0
project_df.plot(ax=ax, figsize=self.figsize, title='Projection Period ({}-{})'.format(projection_period[i],
projection_period[i+1]))
num_plot += 1
fig.tight_layout()
Plot_Data.count += 1
return fig, project_df['y_true'], project_df['y_pred']
except:
raise TypeError("If projecting into the future, please specify project_ind_col that has data available \
in the future time period requested.")
return fig, None, None | python | def baseline_projection_plot(self, y_true, y_pred,
baseline_period, projection_period,
model_name, adj_r2,
data, input_col, output_col, model,
site):
""" Create baseline and projection plots.
Parameters
----------
y_true : pd.Series()
Actual y values.
y_pred : np.ndarray
Predicted y values.
baseline_period : list(str)
Baseline period.
projection_period : list(str)
Projection periods.
model_name : str
Optimal model's name.
adj_r2 : float
Adjusted R2 score of optimal model.
data : pd.Dataframe()
Data containing real values.
input_col : list(str)
Predictor column(s).
output_col : str
Target column.
model : func
Optimal model.
Returns
-------
matplotlib.figure
Baseline plot
"""
# Baseline and projection plots
fig = plt.figure(Plot_Data.count)
# Number of plots to display
if projection_period:
nrows = len(baseline_period) + len(projection_period) / 2
else:
nrows = len(baseline_period) / 2
# Plot 1 - Baseline
base_df = pd.DataFrame()
base_df['y_true'] = y_true
base_df['y_pred'] = y_pred
ax1 = fig.add_subplot(nrows, 1, 1)
base_df.plot(ax=ax1, figsize=self.figsize,
title='Baseline Period ({}-{}). \nBest Model: {}. \nBaseline Adj R2: {}. \nSite: {}.'.format(baseline_period[0], baseline_period[1],
model_name, adj_r2, site))
if projection_period:
# Display projection plots
num_plot = 2
for i in range(0, len(projection_period), 2):
ax = fig.add_subplot(nrows, 1, num_plot)
period = (slice(projection_period[i], projection_period[i+1]))
project_df = pd.DataFrame()
try:
project_df['y_true'] = data.loc[period, output_col]
project_df['y_pred'] = model.predict(data.loc[period, input_col])
# Set all negative values to zero since energy > 0
project_df['y_pred'][project_df['y_pred'] < 0] = 0
project_df.plot(ax=ax, figsize=self.figsize, title='Projection Period ({}-{})'.format(projection_period[i],
projection_period[i+1]))
num_plot += 1
fig.tight_layout()
Plot_Data.count += 1
return fig, project_df['y_true'], project_df['y_pred']
except:
raise TypeError("If projecting into the future, please specify project_ind_col that has data available \
in the future time period requested.")
return fig, None, None | [
"def",
"baseline_projection_plot",
"(",
"self",
",",
"y_true",
",",
"y_pred",
",",
"baseline_period",
",",
"projection_period",
",",
"model_name",
",",
"adj_r2",
",",
"data",
",",
"input_col",
",",
"output_col",
",",
"model",
",",
"site",
")",
":",
"# Baseline and projection plots",
"fig",
"=",
"plt",
".",
"figure",
"(",
"Plot_Data",
".",
"count",
")",
"# Number of plots to display",
"if",
"projection_period",
":",
"nrows",
"=",
"len",
"(",
"baseline_period",
")",
"+",
"len",
"(",
"projection_period",
")",
"/",
"2",
"else",
":",
"nrows",
"=",
"len",
"(",
"baseline_period",
")",
"/",
"2",
"# Plot 1 - Baseline",
"base_df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"base_df",
"[",
"'y_true'",
"]",
"=",
"y_true",
"base_df",
"[",
"'y_pred'",
"]",
"=",
"y_pred",
"ax1",
"=",
"fig",
".",
"add_subplot",
"(",
"nrows",
",",
"1",
",",
"1",
")",
"base_df",
".",
"plot",
"(",
"ax",
"=",
"ax1",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"title",
"=",
"'Baseline Period ({}-{}). \\nBest Model: {}. \\nBaseline Adj R2: {}. \\nSite: {}.'",
".",
"format",
"(",
"baseline_period",
"[",
"0",
"]",
",",
"baseline_period",
"[",
"1",
"]",
",",
"model_name",
",",
"adj_r2",
",",
"site",
")",
")",
"if",
"projection_period",
":",
"# Display projection plots",
"num_plot",
"=",
"2",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"projection_period",
")",
",",
"2",
")",
":",
"ax",
"=",
"fig",
".",
"add_subplot",
"(",
"nrows",
",",
"1",
",",
"num_plot",
")",
"period",
"=",
"(",
"slice",
"(",
"projection_period",
"[",
"i",
"]",
",",
"projection_period",
"[",
"i",
"+",
"1",
"]",
")",
")",
"project_df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"try",
":",
"project_df",
"[",
"'y_true'",
"]",
"=",
"data",
".",
"loc",
"[",
"period",
",",
"output_col",
"]",
"project_df",
"[",
"'y_pred'",
"]",
"=",
"model",
".",
"predict",
"(",
"data",
".",
"loc",
"[",
"period",
",",
"input_col",
"]",
")",
"# Set all negative values to zero since energy > 0",
"project_df",
"[",
"'y_pred'",
"]",
"[",
"project_df",
"[",
"'y_pred'",
"]",
"<",
"0",
"]",
"=",
"0",
"project_df",
".",
"plot",
"(",
"ax",
"=",
"ax",
",",
"figsize",
"=",
"self",
".",
"figsize",
",",
"title",
"=",
"'Projection Period ({}-{})'",
".",
"format",
"(",
"projection_period",
"[",
"i",
"]",
",",
"projection_period",
"[",
"i",
"+",
"1",
"]",
")",
")",
"num_plot",
"+=",
"1",
"fig",
".",
"tight_layout",
"(",
")",
"Plot_Data",
".",
"count",
"+=",
"1",
"return",
"fig",
",",
"project_df",
"[",
"'y_true'",
"]",
",",
"project_df",
"[",
"'y_pred'",
"]",
"except",
":",
"raise",
"TypeError",
"(",
"\"If projecting into the future, please specify project_ind_col that has data available \\\n in the future time period requested.\"",
")",
"return",
"fig",
",",
"None",
",",
"None"
] | Create baseline and projection plots.
Parameters
----------
y_true : pd.Series()
Actual y values.
y_pred : np.ndarray
Predicted y values.
baseline_period : list(str)
Baseline period.
projection_period : list(str)
Projection periods.
model_name : str
Optimal model's name.
adj_r2 : float
Adjusted R2 score of optimal model.
data : pd.Dataframe()
Data containing real values.
input_col : list(str)
Predictor column(s).
output_col : str
Target column.
model : func
Optimal model.
Returns
-------
matplotlib.figure
Baseline plot | [
"Create",
"baseline",
"and",
"projection",
"plots",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Plot_Data.py#L66-L147 | train | 234,930 |
SoftwareDefinedBuildings/XBOS | apps/system_identification/rtu_energy.py | get_thermostat_meter_data | def get_thermostat_meter_data(zone):
"""
This method subscribes to the output of the meter for the given zone.
It returns a handler to call when you want to stop subscribing data, which
returns a list of the data readins over that time period
"""
meter_uri = zone2meter.get(zone, "None")
data = []
def cb(msg):
for po in msg.payload_objects:
if po.type_dotted == (2,0,9,1):
m = msgpack.unpackb(po.content)
data.append(m['current_demand'])
handle = c.subscribe(meter_uri+"/signal/meter", cb)
def stop():
c.unsubscribe(handle)
return data
return stop | python | def get_thermostat_meter_data(zone):
"""
This method subscribes to the output of the meter for the given zone.
It returns a handler to call when you want to stop subscribing data, which
returns a list of the data readins over that time period
"""
meter_uri = zone2meter.get(zone, "None")
data = []
def cb(msg):
for po in msg.payload_objects:
if po.type_dotted == (2,0,9,1):
m = msgpack.unpackb(po.content)
data.append(m['current_demand'])
handle = c.subscribe(meter_uri+"/signal/meter", cb)
def stop():
c.unsubscribe(handle)
return data
return stop | [
"def",
"get_thermostat_meter_data",
"(",
"zone",
")",
":",
"meter_uri",
"=",
"zone2meter",
".",
"get",
"(",
"zone",
",",
"\"None\"",
")",
"data",
"=",
"[",
"]",
"def",
"cb",
"(",
"msg",
")",
":",
"for",
"po",
"in",
"msg",
".",
"payload_objects",
":",
"if",
"po",
".",
"type_dotted",
"==",
"(",
"2",
",",
"0",
",",
"9",
",",
"1",
")",
":",
"m",
"=",
"msgpack",
".",
"unpackb",
"(",
"po",
".",
"content",
")",
"data",
".",
"append",
"(",
"m",
"[",
"'current_demand'",
"]",
")",
"handle",
"=",
"c",
".",
"subscribe",
"(",
"meter_uri",
"+",
"\"/signal/meter\"",
",",
"cb",
")",
"def",
"stop",
"(",
")",
":",
"c",
".",
"unsubscribe",
"(",
"handle",
")",
"return",
"data",
"return",
"stop"
] | This method subscribes to the output of the meter for the given zone.
It returns a handler to call when you want to stop subscribing data, which
returns a list of the data readins over that time period | [
"This",
"method",
"subscribes",
"to",
"the",
"output",
"of",
"the",
"meter",
"for",
"the",
"given",
"zone",
".",
"It",
"returns",
"a",
"handler",
"to",
"call",
"when",
"you",
"want",
"to",
"stop",
"subscribing",
"data",
"which",
"returns",
"a",
"list",
"of",
"the",
"data",
"readins",
"over",
"that",
"time",
"period"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/system_identification/rtu_energy.py#L53-L71 | train | 234,931 |
SoftwareDefinedBuildings/XBOS | apps/system_identification/rtu_energy.py | call_heat | def call_heat(tstat):
"""
Adjusts the temperature setpoints in order to call for heating. Returns
a handler to call when you want to reset the thermostat
"""
current_hsp, current_csp = tstat.heating_setpoint, tstat.cooling_setpoint
current_temp = tstat.temperature
tstat.write({
'heating_setpoint': current_temp+10,
'cooling_setpoint': current_temp+20,
'mode': HEAT,
})
def restore():
tstat.write({
'heating_setpoint': current_hsp,
'cooling_setpoint': current_csp,
'mode': AUTO,
})
return restore | python | def call_heat(tstat):
"""
Adjusts the temperature setpoints in order to call for heating. Returns
a handler to call when you want to reset the thermostat
"""
current_hsp, current_csp = tstat.heating_setpoint, tstat.cooling_setpoint
current_temp = tstat.temperature
tstat.write({
'heating_setpoint': current_temp+10,
'cooling_setpoint': current_temp+20,
'mode': HEAT,
})
def restore():
tstat.write({
'heating_setpoint': current_hsp,
'cooling_setpoint': current_csp,
'mode': AUTO,
})
return restore | [
"def",
"call_heat",
"(",
"tstat",
")",
":",
"current_hsp",
",",
"current_csp",
"=",
"tstat",
".",
"heating_setpoint",
",",
"tstat",
".",
"cooling_setpoint",
"current_temp",
"=",
"tstat",
".",
"temperature",
"tstat",
".",
"write",
"(",
"{",
"'heating_setpoint'",
":",
"current_temp",
"+",
"10",
",",
"'cooling_setpoint'",
":",
"current_temp",
"+",
"20",
",",
"'mode'",
":",
"HEAT",
",",
"}",
")",
"def",
"restore",
"(",
")",
":",
"tstat",
".",
"write",
"(",
"{",
"'heating_setpoint'",
":",
"current_hsp",
",",
"'cooling_setpoint'",
":",
"current_csp",
",",
"'mode'",
":",
"AUTO",
",",
"}",
")",
"return",
"restore"
] | Adjusts the temperature setpoints in order to call for heating. Returns
a handler to call when you want to reset the thermostat | [
"Adjusts",
"the",
"temperature",
"setpoints",
"in",
"order",
"to",
"call",
"for",
"heating",
".",
"Returns",
"a",
"handler",
"to",
"call",
"when",
"you",
"want",
"to",
"reset",
"the",
"thermostat"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/system_identification/rtu_energy.py#L73-L92 | train | 234,932 |
SoftwareDefinedBuildings/XBOS | apps/system_identification/rtu_energy.py | call_cool | def call_cool(tstat):
"""
Adjusts the temperature setpoints in order to call for cooling. Returns
a handler to call when you want to reset the thermostat
"""
current_hsp, current_csp = tstat.heating_setpoint, tstat.cooling_setpoint
current_temp = tstat.temperature
tstat.write({
'heating_setpoint': current_temp-20,
'cooling_setpoint': current_temp-10,
'mode': COOL,
})
def restore():
tstat.write({
'heating_setpoint': current_hsp,
'cooling_setpoint': current_csp,
'mode': AUTO,
})
return restore | python | def call_cool(tstat):
"""
Adjusts the temperature setpoints in order to call for cooling. Returns
a handler to call when you want to reset the thermostat
"""
current_hsp, current_csp = tstat.heating_setpoint, tstat.cooling_setpoint
current_temp = tstat.temperature
tstat.write({
'heating_setpoint': current_temp-20,
'cooling_setpoint': current_temp-10,
'mode': COOL,
})
def restore():
tstat.write({
'heating_setpoint': current_hsp,
'cooling_setpoint': current_csp,
'mode': AUTO,
})
return restore | [
"def",
"call_cool",
"(",
"tstat",
")",
":",
"current_hsp",
",",
"current_csp",
"=",
"tstat",
".",
"heating_setpoint",
",",
"tstat",
".",
"cooling_setpoint",
"current_temp",
"=",
"tstat",
".",
"temperature",
"tstat",
".",
"write",
"(",
"{",
"'heating_setpoint'",
":",
"current_temp",
"-",
"20",
",",
"'cooling_setpoint'",
":",
"current_temp",
"-",
"10",
",",
"'mode'",
":",
"COOL",
",",
"}",
")",
"def",
"restore",
"(",
")",
":",
"tstat",
".",
"write",
"(",
"{",
"'heating_setpoint'",
":",
"current_hsp",
",",
"'cooling_setpoint'",
":",
"current_csp",
",",
"'mode'",
":",
"AUTO",
",",
"}",
")",
"return",
"restore"
] | Adjusts the temperature setpoints in order to call for cooling. Returns
a handler to call when you want to reset the thermostat | [
"Adjusts",
"the",
"temperature",
"setpoints",
"in",
"order",
"to",
"call",
"for",
"cooling",
".",
"Returns",
"a",
"handler",
"to",
"call",
"when",
"you",
"want",
"to",
"reset",
"the",
"thermostat"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/system_identification/rtu_energy.py#L94-L113 | train | 234,933 |
SoftwareDefinedBuildings/XBOS | apps/system_identification/rtu_energy.py | call_fan | def call_fan(tstat):
"""
Toggles the fan
"""
old_fan = tstat.fan
tstat.write({
'fan': not old_fan,
})
def restore():
tstat.write({
'fan': old_fan,
})
return restore | python | def call_fan(tstat):
"""
Toggles the fan
"""
old_fan = tstat.fan
tstat.write({
'fan': not old_fan,
})
def restore():
tstat.write({
'fan': old_fan,
})
return restore | [
"def",
"call_fan",
"(",
"tstat",
")",
":",
"old_fan",
"=",
"tstat",
".",
"fan",
"tstat",
".",
"write",
"(",
"{",
"'fan'",
":",
"not",
"old_fan",
",",
"}",
")",
"def",
"restore",
"(",
")",
":",
"tstat",
".",
"write",
"(",
"{",
"'fan'",
":",
"old_fan",
",",
"}",
")",
"return",
"restore"
] | Toggles the fan | [
"Toggles",
"the",
"fan"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/system_identification/rtu_energy.py#L115-L129 | train | 234,934 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_Data._load_csv | def _load_csv(self, file_name, folder_name, head_row, index_col, convert_col, concat_files):
""" Load single csv file.
Parameters
----------
file_name : str
CSV file to be imported. Defaults to '*' - all csv files in the folder.
folder_name : str
Folder where file resides. Defaults to '.' - current directory.
head_row : int
Skips all rows from 0 to head_row-1
index_col : int
Skips all columns from 0 to index_col-1
convert_col : bool
Convert columns to numeric type
concat_files : bool
Appends data from files to result dataframe
Returns
-------
pd.DataFrame()
Dataframe containing csv data
"""
# Denotes all csv files
if file_name == "*":
if not os.path.isdir(folder_name):
raise OSError('Folder does not exist.')
else:
file_name_list = sorted(glob.glob(folder_name + '*.csv'))
if not file_name_list:
raise OSError('Either the folder does not contain any csv files or invalid folder provided.')
else:
# Call previous function again with parameters changed (file_name=file_name_list, folder_name=None)
# Done to reduce redundancy of code
self.import_csv(file_name=file_name_list, head_row=head_row, index_col=index_col,
convert_col=convert_col, concat_files=concat_files)
return self.data
else:
if not os.path.isdir(folder_name):
raise OSError('Folder does not exist.')
else:
path = os.path.join(folder_name, file_name)
if head_row > 0:
data = pd.read_csv(path, index_col=index_col, skiprows=[i for i in range(head_row-1)])
else:
data = pd.read_csv(path, index_col=index_col)
# Convert time into datetime format
try:
# Special case format 1/4/14 21:30
data.index = pd.to_datetime(data.index, format='%m/%d/%y %H:%M')
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format=True)
# Convert all columns to numeric type
if convert_col:
# Check columns in dataframe to see if they are numeric
for col in data.columns:
# If particular column is not numeric, then convert to numeric type
if data[col].dtype != np.number:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data | python | def _load_csv(self, file_name, folder_name, head_row, index_col, convert_col, concat_files):
""" Load single csv file.
Parameters
----------
file_name : str
CSV file to be imported. Defaults to '*' - all csv files in the folder.
folder_name : str
Folder where file resides. Defaults to '.' - current directory.
head_row : int
Skips all rows from 0 to head_row-1
index_col : int
Skips all columns from 0 to index_col-1
convert_col : bool
Convert columns to numeric type
concat_files : bool
Appends data from files to result dataframe
Returns
-------
pd.DataFrame()
Dataframe containing csv data
"""
# Denotes all csv files
if file_name == "*":
if not os.path.isdir(folder_name):
raise OSError('Folder does not exist.')
else:
file_name_list = sorted(glob.glob(folder_name + '*.csv'))
if not file_name_list:
raise OSError('Either the folder does not contain any csv files or invalid folder provided.')
else:
# Call previous function again with parameters changed (file_name=file_name_list, folder_name=None)
# Done to reduce redundancy of code
self.import_csv(file_name=file_name_list, head_row=head_row, index_col=index_col,
convert_col=convert_col, concat_files=concat_files)
return self.data
else:
if not os.path.isdir(folder_name):
raise OSError('Folder does not exist.')
else:
path = os.path.join(folder_name, file_name)
if head_row > 0:
data = pd.read_csv(path, index_col=index_col, skiprows=[i for i in range(head_row-1)])
else:
data = pd.read_csv(path, index_col=index_col)
# Convert time into datetime format
try:
# Special case format 1/4/14 21:30
data.index = pd.to_datetime(data.index, format='%m/%d/%y %H:%M')
except:
data.index = pd.to_datetime(data.index, dayfirst=False, infer_datetime_format=True)
# Convert all columns to numeric type
if convert_col:
# Check columns in dataframe to see if they are numeric
for col in data.columns:
# If particular column is not numeric, then convert to numeric type
if data[col].dtype != np.number:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data | [
"def",
"_load_csv",
"(",
"self",
",",
"file_name",
",",
"folder_name",
",",
"head_row",
",",
"index_col",
",",
"convert_col",
",",
"concat_files",
")",
":",
"# Denotes all csv files",
"if",
"file_name",
"==",
"\"*\"",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder_name",
")",
":",
"raise",
"OSError",
"(",
"'Folder does not exist.'",
")",
"else",
":",
"file_name_list",
"=",
"sorted",
"(",
"glob",
".",
"glob",
"(",
"folder_name",
"+",
"'*.csv'",
")",
")",
"if",
"not",
"file_name_list",
":",
"raise",
"OSError",
"(",
"'Either the folder does not contain any csv files or invalid folder provided.'",
")",
"else",
":",
"# Call previous function again with parameters changed (file_name=file_name_list, folder_name=None)",
"# Done to reduce redundancy of code",
"self",
".",
"import_csv",
"(",
"file_name",
"=",
"file_name_list",
",",
"head_row",
"=",
"head_row",
",",
"index_col",
"=",
"index_col",
",",
"convert_col",
"=",
"convert_col",
",",
"concat_files",
"=",
"concat_files",
")",
"return",
"self",
".",
"data",
"else",
":",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"folder_name",
")",
":",
"raise",
"OSError",
"(",
"'Folder does not exist.'",
")",
"else",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"folder_name",
",",
"file_name",
")",
"if",
"head_row",
">",
"0",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"path",
",",
"index_col",
"=",
"index_col",
",",
"skiprows",
"=",
"[",
"i",
"for",
"i",
"in",
"range",
"(",
"head_row",
"-",
"1",
")",
"]",
")",
"else",
":",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"path",
",",
"index_col",
"=",
"index_col",
")",
"# Convert time into datetime format",
"try",
":",
"# Special case format 1/4/14 21:30",
"data",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"data",
".",
"index",
",",
"format",
"=",
"'%m/%d/%y %H:%M'",
")",
"except",
":",
"data",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"data",
".",
"index",
",",
"dayfirst",
"=",
"False",
",",
"infer_datetime_format",
"=",
"True",
")",
"# Convert all columns to numeric type",
"if",
"convert_col",
":",
"# Check columns in dataframe to see if they are numeric",
"for",
"col",
"in",
"data",
".",
"columns",
":",
"# If particular column is not numeric, then convert to numeric type",
"if",
"data",
"[",
"col",
"]",
".",
"dtype",
"!=",
"np",
".",
"number",
":",
"data",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"data",
"[",
"col",
"]",
",",
"errors",
"=",
"\"coerce\"",
")",
"return",
"data"
] | Load single csv file.
Parameters
----------
file_name : str
CSV file to be imported. Defaults to '*' - all csv files in the folder.
folder_name : str
Folder where file resides. Defaults to '.' - current directory.
head_row : int
Skips all rows from 0 to head_row-1
index_col : int
Skips all columns from 0 to index_col-1
convert_col : bool
Convert columns to numeric type
concat_files : bool
Appends data from files to result dataframe
Returns
-------
pd.DataFrame()
Dataframe containing csv data | [
"Load",
"single",
"csv",
"file",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L106-L174 | train | 234,935 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.convert_to_utc | def convert_to_utc(time):
""" Convert time to UTC
Parameters
----------
time : str
Time to convert. Has to be of the format '2016-01-01T00:00:00-08:00'.
Returns
-------
str
UTC timestamp.
"""
# time is already in UTC
if 'Z' in time:
return time
else:
time_formatted = time[:-3] + time[-2:]
dt = datetime.strptime(time_formatted, '%Y-%m-%dT%H:%M:%S%z')
dt = dt.astimezone(timezone('UTC'))
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') | python | def convert_to_utc(time):
""" Convert time to UTC
Parameters
----------
time : str
Time to convert. Has to be of the format '2016-01-01T00:00:00-08:00'.
Returns
-------
str
UTC timestamp.
"""
# time is already in UTC
if 'Z' in time:
return time
else:
time_formatted = time[:-3] + time[-2:]
dt = datetime.strptime(time_formatted, '%Y-%m-%dT%H:%M:%S%z')
dt = dt.astimezone(timezone('UTC'))
return dt.strftime('%Y-%m-%dT%H:%M:%SZ') | [
"def",
"convert_to_utc",
"(",
"time",
")",
":",
"# time is already in UTC",
"if",
"'Z'",
"in",
"time",
":",
"return",
"time",
"else",
":",
"time_formatted",
"=",
"time",
"[",
":",
"-",
"3",
"]",
"+",
"time",
"[",
"-",
"2",
":",
"]",
"dt",
"=",
"datetime",
".",
"strptime",
"(",
"time_formatted",
",",
"'%Y-%m-%dT%H:%M:%S%z'",
")",
"dt",
"=",
"dt",
".",
"astimezone",
"(",
"timezone",
"(",
"'UTC'",
")",
")",
"return",
"dt",
".",
"strftime",
"(",
"'%Y-%m-%dT%H:%M:%SZ'",
")"
] | Convert time to UTC
Parameters
----------
time : str
Time to convert. Has to be of the format '2016-01-01T00:00:00-08:00'.
Returns
-------
str
UTC timestamp. | [
"Convert",
"time",
"to",
"UTC"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L190-L212 | train | 234,936 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.get_meter | def get_meter(self, site, start, end, point_type='Green_Button_Meter',
var="meter", agg='MEAN', window='24h', aligned=True, return_names=True):
""" Get meter data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
request = self.compose_MDAL_dic(point_type=point_type, site=site, start=start, end=end,
var=var, agg=agg, window=window, aligned=aligned)
resp = self.m.query(request)
if return_names:
resp = self.replace_uuid_w_names(resp)
return resp | python | def get_meter(self, site, start, end, point_type='Green_Button_Meter',
var="meter", agg='MEAN', window='24h', aligned=True, return_names=True):
""" Get meter data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
request = self.compose_MDAL_dic(point_type=point_type, site=site, start=start, end=end,
var=var, agg=agg, window=window, aligned=aligned)
resp = self.m.query(request)
if return_names:
resp = self.replace_uuid_w_names(resp)
return resp | [
"def",
"get_meter",
"(",
"self",
",",
"site",
",",
"start",
",",
"end",
",",
"point_type",
"=",
"'Green_Button_Meter'",
",",
"var",
"=",
"\"meter\"",
",",
"agg",
"=",
"'MEAN'",
",",
"window",
"=",
"'24h'",
",",
"aligned",
"=",
"True",
",",
"return_names",
"=",
"True",
")",
":",
"# Convert time to UTC",
"start",
"=",
"self",
".",
"convert_to_utc",
"(",
"start",
")",
"end",
"=",
"self",
".",
"convert_to_utc",
"(",
"end",
")",
"request",
"=",
"self",
".",
"compose_MDAL_dic",
"(",
"point_type",
"=",
"point_type",
",",
"site",
"=",
"site",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"var",
"=",
"var",
",",
"agg",
"=",
"agg",
",",
"window",
"=",
"window",
",",
"aligned",
"=",
"aligned",
")",
"resp",
"=",
"self",
".",
"m",
".",
"query",
"(",
"request",
")",
"if",
"return_names",
":",
"resp",
"=",
"self",
".",
"replace_uuid_w_names",
"(",
"resp",
")",
"return",
"resp"
] | Get meter data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
??? | [
"Get",
"meter",
"data",
"from",
"MDAL",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L215-L258 | train | 234,937 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.get_tstat | def get_tstat(self, site, start, end, var="tstat_temp", agg='MEAN', window='24h', aligned=True, return_names=True):
""" Get thermostat data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
point_map = {
"tstat_state" : "Thermostat_Status",
"tstat_hsp" : "Supply_Air_Temperature_Heating_Setpoint",
"tstat_csp" : "Supply_Air_Temperature_Cooling_Setpoint",
"tstat_temp": "Temperature_Sensor"
}
if isinstance(var,list):
point_type = [point_map[point_type] for point_type in var] # list of all the point names using BRICK classes
else:
point_type = point_map[var] # single value using BRICK classes
request = self.compose_MDAL_dic(point_type=point_type, site=site, start=start, end=end,
var=var, agg=agg, window=window, aligned=aligned)
resp = self.m.query(request)
if return_names:
resp = self.replace_uuid_w_names(resp)
return resp | python | def get_tstat(self, site, start, end, var="tstat_temp", agg='MEAN', window='24h', aligned=True, return_names=True):
""" Get thermostat data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
point_map = {
"tstat_state" : "Thermostat_Status",
"tstat_hsp" : "Supply_Air_Temperature_Heating_Setpoint",
"tstat_csp" : "Supply_Air_Temperature_Cooling_Setpoint",
"tstat_temp": "Temperature_Sensor"
}
if isinstance(var,list):
point_type = [point_map[point_type] for point_type in var] # list of all the point names using BRICK classes
else:
point_type = point_map[var] # single value using BRICK classes
request = self.compose_MDAL_dic(point_type=point_type, site=site, start=start, end=end,
var=var, agg=agg, window=window, aligned=aligned)
resp = self.m.query(request)
if return_names:
resp = self.replace_uuid_w_names(resp)
return resp | [
"def",
"get_tstat",
"(",
"self",
",",
"site",
",",
"start",
",",
"end",
",",
"var",
"=",
"\"tstat_temp\"",
",",
"agg",
"=",
"'MEAN'",
",",
"window",
"=",
"'24h'",
",",
"aligned",
"=",
"True",
",",
"return_names",
"=",
"True",
")",
":",
"# Convert time to UTC",
"start",
"=",
"self",
".",
"convert_to_utc",
"(",
"start",
")",
"end",
"=",
"self",
".",
"convert_to_utc",
"(",
"end",
")",
"point_map",
"=",
"{",
"\"tstat_state\"",
":",
"\"Thermostat_Status\"",
",",
"\"tstat_hsp\"",
":",
"\"Supply_Air_Temperature_Heating_Setpoint\"",
",",
"\"tstat_csp\"",
":",
"\"Supply_Air_Temperature_Cooling_Setpoint\"",
",",
"\"tstat_temp\"",
":",
"\"Temperature_Sensor\"",
"}",
"if",
"isinstance",
"(",
"var",
",",
"list",
")",
":",
"point_type",
"=",
"[",
"point_map",
"[",
"point_type",
"]",
"for",
"point_type",
"in",
"var",
"]",
"# list of all the point names using BRICK classes",
"else",
":",
"point_type",
"=",
"point_map",
"[",
"var",
"]",
"# single value using BRICK classes",
"request",
"=",
"self",
".",
"compose_MDAL_dic",
"(",
"point_type",
"=",
"point_type",
",",
"site",
"=",
"site",
",",
"start",
"=",
"start",
",",
"end",
"=",
"end",
",",
"var",
"=",
"var",
",",
"agg",
"=",
"agg",
",",
"window",
"=",
"window",
",",
"aligned",
"=",
"aligned",
")",
"resp",
"=",
"self",
".",
"m",
".",
"query",
"(",
"request",
")",
"if",
"return_names",
":",
"resp",
"=",
"self",
".",
"replace_uuid_w_names",
"(",
"resp",
")",
"return",
"resp"
] | Get thermostat data from MDAL.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
??? | [
"Get",
"thermostat",
"data",
"from",
"MDAL",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L307-L359 | train | 234,938 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.compose_MDAL_dic | def compose_MDAL_dic(self, site, point_type,
start, end, var, agg, window, aligned, points=None, return_names=False):
""" Create dictionary for MDAL request.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
request = {}
# Add Time Details - single set for one or multiple series
request['Time'] = {
'Start': start,
'End': end,
'Window': window,
'Aligned': aligned
}
# Define Variables
request["Variables"] = {}
request['Composition'] = []
request['Aggregation'] = {}
if isinstance(point_type, str): # if point_type is a string -> single type of point requested
request["Variables"][var] = self.compose_BRICK_query(point_type=point_type,site=site) # pass one point type at the time
request['Composition'] = [var]
request['Aggregation'][var] = [agg]
elif isinstance(point_type, list): # loop through all the point_types and create one section of the brick query at the time
for idx, point in enumerate(point_type):
request["Variables"][var[idx]] = self.compose_BRICK_query(point_type=point,site=site) # pass one point type at the time
request['Composition'].append(var[idx])
if isinstance(agg, str): # if agg is a string -> single type of aggregation requested
request['Aggregation'][var[idx]] = [agg]
elif isinstance(agg, list): # if agg is a list -> expected one agg per point
request['Aggregation'][var[idx]] = [agg[idx]]
return request | python | def compose_MDAL_dic(self, site, point_type,
start, end, var, agg, window, aligned, points=None, return_names=False):
""" Create dictionary for MDAL request.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
???
"""
# Convert time to UTC
start = self.convert_to_utc(start)
end = self.convert_to_utc(end)
request = {}
# Add Time Details - single set for one or multiple series
request['Time'] = {
'Start': start,
'End': end,
'Window': window,
'Aligned': aligned
}
# Define Variables
request["Variables"] = {}
request['Composition'] = []
request['Aggregation'] = {}
if isinstance(point_type, str): # if point_type is a string -> single type of point requested
request["Variables"][var] = self.compose_BRICK_query(point_type=point_type,site=site) # pass one point type at the time
request['Composition'] = [var]
request['Aggregation'][var] = [agg]
elif isinstance(point_type, list): # loop through all the point_types and create one section of the brick query at the time
for idx, point in enumerate(point_type):
request["Variables"][var[idx]] = self.compose_BRICK_query(point_type=point,site=site) # pass one point type at the time
request['Composition'].append(var[idx])
if isinstance(agg, str): # if agg is a string -> single type of aggregation requested
request['Aggregation'][var[idx]] = [agg]
elif isinstance(agg, list): # if agg is a list -> expected one agg per point
request['Aggregation'][var[idx]] = [agg[idx]]
return request | [
"def",
"compose_MDAL_dic",
"(",
"self",
",",
"site",
",",
"point_type",
",",
"start",
",",
"end",
",",
"var",
",",
"agg",
",",
"window",
",",
"aligned",
",",
"points",
"=",
"None",
",",
"return_names",
"=",
"False",
")",
":",
"# Convert time to UTC",
"start",
"=",
"self",
".",
"convert_to_utc",
"(",
"start",
")",
"end",
"=",
"self",
".",
"convert_to_utc",
"(",
"end",
")",
"request",
"=",
"{",
"}",
"# Add Time Details - single set for one or multiple series",
"request",
"[",
"'Time'",
"]",
"=",
"{",
"'Start'",
":",
"start",
",",
"'End'",
":",
"end",
",",
"'Window'",
":",
"window",
",",
"'Aligned'",
":",
"aligned",
"}",
"# Define Variables ",
"request",
"[",
"\"Variables\"",
"]",
"=",
"{",
"}",
"request",
"[",
"'Composition'",
"]",
"=",
"[",
"]",
"request",
"[",
"'Aggregation'",
"]",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"point_type",
",",
"str",
")",
":",
"# if point_type is a string -> single type of point requested",
"request",
"[",
"\"Variables\"",
"]",
"[",
"var",
"]",
"=",
"self",
".",
"compose_BRICK_query",
"(",
"point_type",
"=",
"point_type",
",",
"site",
"=",
"site",
")",
"# pass one point type at the time",
"request",
"[",
"'Composition'",
"]",
"=",
"[",
"var",
"]",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"]",
"=",
"[",
"agg",
"]",
"elif",
"isinstance",
"(",
"point_type",
",",
"list",
")",
":",
"# loop through all the point_types and create one section of the brick query at the time",
"for",
"idx",
",",
"point",
"in",
"enumerate",
"(",
"point_type",
")",
":",
"request",
"[",
"\"Variables\"",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"self",
".",
"compose_BRICK_query",
"(",
"point_type",
"=",
"point",
",",
"site",
"=",
"site",
")",
"# pass one point type at the time",
"request",
"[",
"'Composition'",
"]",
".",
"append",
"(",
"var",
"[",
"idx",
"]",
")",
"if",
"isinstance",
"(",
"agg",
",",
"str",
")",
":",
"# if agg is a string -> single type of aggregation requested",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"[",
"agg",
"]",
"elif",
"isinstance",
"(",
"agg",
",",
"list",
")",
":",
"# if agg is a list -> expected one agg per point",
"request",
"[",
"'Aggregation'",
"]",
"[",
"var",
"[",
"idx",
"]",
"]",
"=",
"[",
"agg",
"[",
"idx",
"]",
"]",
"return",
"request"
] | Create dictionary for MDAL request.
Parameters
----------
site : str
Building name.
start : str
Start date - 'YYYY-MM-DDTHH:MM:SSZ'
end : str
End date - 'YYYY-MM-DDTHH:MM:SSZ'
point_type : str
Type of data, i.e. Green_Button_Meter, Building_Electric_Meter...
var : str
Variable - "meter", "weather"...
agg : str
Aggregation - MEAN, SUM, RAW...
window : str
Size of the moving window.
aligned : bool
???
return_names : bool
???
Returns
-------
(df, mapping, context)
??? | [
"Create",
"dictionary",
"for",
"MDAL",
"request",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L362-L428 | train | 234,939 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.get_point_name | def get_point_name(self, context):
""" Get point name.
Parameters
----------
context : ???
???
Returns
-------
???
???
"""
metadata_table = self.parse_context(context)
return metadata_table.apply(self.strip_point_name, axis=1) | python | def get_point_name(self, context):
""" Get point name.
Parameters
----------
context : ???
???
Returns
-------
???
???
"""
metadata_table = self.parse_context(context)
return metadata_table.apply(self.strip_point_name, axis=1) | [
"def",
"get_point_name",
"(",
"self",
",",
"context",
")",
":",
"metadata_table",
"=",
"self",
".",
"parse_context",
"(",
"context",
")",
"return",
"metadata_table",
".",
"apply",
"(",
"self",
".",
"strip_point_name",
",",
"axis",
"=",
"1",
")"
] | Get point name.
Parameters
----------
context : ???
???
Returns
-------
???
??? | [
"Get",
"point",
"name",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L510-L526 | train | 234,940 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Import_Data.py | Import_MDAL.replace_uuid_w_names | def replace_uuid_w_names(self, resp):
""" Replace the uuid's with names.
Parameters
----------
resp : ???
???
Returns
-------
???
???
"""
col_mapper = self.get_point_name(resp.context)["?point"].to_dict()
resp.df.rename(columns=col_mapper, inplace=True)
return resp | python | def replace_uuid_w_names(self, resp):
""" Replace the uuid's with names.
Parameters
----------
resp : ???
???
Returns
-------
???
???
"""
col_mapper = self.get_point_name(resp.context)["?point"].to_dict()
resp.df.rename(columns=col_mapper, inplace=True)
return resp | [
"def",
"replace_uuid_w_names",
"(",
"self",
",",
"resp",
")",
":",
"col_mapper",
"=",
"self",
".",
"get_point_name",
"(",
"resp",
".",
"context",
")",
"[",
"\"?point\"",
"]",
".",
"to_dict",
"(",
")",
"resp",
".",
"df",
".",
"rename",
"(",
"columns",
"=",
"col_mapper",
",",
"inplace",
"=",
"True",
")",
"return",
"resp"
] | Replace the uuid's with names.
Parameters
----------
resp : ???
???
Returns
-------
???
??? | [
"Replace",
"the",
"uuid",
"s",
"with",
"names",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Import_Data.py#L529-L546 | train | 234,941 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.resample_data | def resample_data(self, data, freq, resampler='mean'):
""" Resample dataframe.
Note
----
1. Figure out how to apply different functions to different columns .apply()
2. This theoretically work in upsampling too, check docs
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
Parameters
----------
data : pd.DataFrame()
Dataframe to resample
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
Returns
-------
pd.DataFrame()
Dataframe containing resampled data
"""
if resampler == 'mean':
data = data.resample(freq).mean()
elif resampler == 'max':
data = data.resample(freq).max()
else:
raise ValueError('Resampler can be \'mean\' or \'max\' only.')
return data | python | def resample_data(self, data, freq, resampler='mean'):
""" Resample dataframe.
Note
----
1. Figure out how to apply different functions to different columns .apply()
2. This theoretically work in upsampling too, check docs
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
Parameters
----------
data : pd.DataFrame()
Dataframe to resample
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
Returns
-------
pd.DataFrame()
Dataframe containing resampled data
"""
if resampler == 'mean':
data = data.resample(freq).mean()
elif resampler == 'max':
data = data.resample(freq).max()
else:
raise ValueError('Resampler can be \'mean\' or \'max\' only.')
return data | [
"def",
"resample_data",
"(",
"self",
",",
"data",
",",
"freq",
",",
"resampler",
"=",
"'mean'",
")",
":",
"if",
"resampler",
"==",
"'mean'",
":",
"data",
"=",
"data",
".",
"resample",
"(",
"freq",
")",
".",
"mean",
"(",
")",
"elif",
"resampler",
"==",
"'max'",
":",
"data",
"=",
"data",
".",
"resample",
"(",
"freq",
")",
".",
"max",
"(",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Resampler can be \\'mean\\' or \\'max\\' only.'",
")",
"return",
"data"
] | Resample dataframe.
Note
----
1. Figure out how to apply different functions to different columns .apply()
2. This theoretically work in upsampling too, check docs
http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.resample.html
Parameters
----------
data : pd.DataFrame()
Dataframe to resample
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
Returns
-------
pd.DataFrame()
Dataframe containing resampled data | [
"Resample",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L76-L108 | train | 234,942 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.interpolate_data | def interpolate_data(self, data, limit, method):
""" Interpolate dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to interpolate
limit : int
Interpolation limit.
method : str
Interpolation method.
Returns
-------
pd.DataFrame()
Dataframe containing interpolated data
"""
data = data.interpolate(how="index", limit=limit, method=method)
return data | python | def interpolate_data(self, data, limit, method):
""" Interpolate dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to interpolate
limit : int
Interpolation limit.
method : str
Interpolation method.
Returns
-------
pd.DataFrame()
Dataframe containing interpolated data
"""
data = data.interpolate(how="index", limit=limit, method=method)
return data | [
"def",
"interpolate_data",
"(",
"self",
",",
"data",
",",
"limit",
",",
"method",
")",
":",
"data",
"=",
"data",
".",
"interpolate",
"(",
"how",
"=",
"\"index\"",
",",
"limit",
"=",
"limit",
",",
"method",
"=",
"method",
")",
"return",
"data"
] | Interpolate dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to interpolate
limit : int
Interpolation limit.
method : str
Interpolation method.
Returns
-------
pd.DataFrame()
Dataframe containing interpolated data | [
"Interpolate",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L111-L130 | train | 234,943 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.remove_na | def remove_na(self, data, remove_na_how):
""" Remove NAs from dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove NAs from.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
Returns
-------
pd.DataFrame()
Dataframe with NAs removed.
"""
data = data.dropna(how=remove_na_how)
return data | python | def remove_na(self, data, remove_na_how):
""" Remove NAs from dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove NAs from.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
Returns
-------
pd.DataFrame()
Dataframe with NAs removed.
"""
data = data.dropna(how=remove_na_how)
return data | [
"def",
"remove_na",
"(",
"self",
",",
"data",
",",
"remove_na_how",
")",
":",
"data",
"=",
"data",
".",
"dropna",
"(",
"how",
"=",
"remove_na_how",
")",
"return",
"data"
] | Remove NAs from dataframe.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove NAs from.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
Returns
-------
pd.DataFrame()
Dataframe with NAs removed. | [
"Remove",
"NAs",
"from",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L133-L150 | train | 234,944 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.remove_outlier | def remove_outlier(self, data, sd_val):
""" Remove outliers from dataframe.
Note
----
1. This function excludes all lines with NA in all columns.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove outliers from.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
Returns
-------
pd.DataFrame()
Dataframe with outliers removed.
"""
data = data.dropna()
data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]
return data | python | def remove_outlier(self, data, sd_val):
""" Remove outliers from dataframe.
Note
----
1. This function excludes all lines with NA in all columns.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove outliers from.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
Returns
-------
pd.DataFrame()
Dataframe with outliers removed.
"""
data = data.dropna()
data = data[(np.abs(stats.zscore(data)) < float(sd_val)).all(axis=1)]
return data | [
"def",
"remove_outlier",
"(",
"self",
",",
"data",
",",
"sd_val",
")",
":",
"data",
"=",
"data",
".",
"dropna",
"(",
")",
"data",
"=",
"data",
"[",
"(",
"np",
".",
"abs",
"(",
"stats",
".",
"zscore",
"(",
"data",
")",
")",
"<",
"float",
"(",
"sd_val",
")",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"]",
"return",
"data"
] | Remove outliers from dataframe.
Note
----
1. This function excludes all lines with NA in all columns.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove outliers from.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
Returns
-------
pd.DataFrame()
Dataframe with outliers removed. | [
"Remove",
"outliers",
"from",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L153-L175 | train | 234,945 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.remove_out_of_bounds | def remove_out_of_bounds(self, data, low_bound, high_bound):
""" Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed.
"""
data = data.dropna()
data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)]
return data | python | def remove_out_of_bounds(self, data, low_bound, high_bound):
""" Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed.
"""
data = data.dropna()
data = data[(data > low_bound).all(axis=1) & (data < high_bound).all(axis=1)]
return data | [
"def",
"remove_out_of_bounds",
"(",
"self",
",",
"data",
",",
"low_bound",
",",
"high_bound",
")",
":",
"data",
"=",
"data",
".",
"dropna",
"(",
")",
"data",
"=",
"data",
"[",
"(",
"data",
">",
"low_bound",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"&",
"(",
"data",
"<",
"high_bound",
")",
".",
"all",
"(",
"axis",
"=",
"1",
")",
"]",
"return",
"data"
] | Remove out of bound datapoints from dataframe.
This function removes all points < low_bound and > high_bound.
To Do,
1. Add a different boundary for each column.
Parameters
----------
data : pd.DataFrame()
Dataframe to remove bounds from.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
Returns
-------
pd.DataFrame()
Dataframe with out of bounds removed. | [
"Remove",
"out",
"of",
"bound",
"datapoints",
"from",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L178-L203 | train | 234,946 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data._set_TS_index | def _set_TS_index(self, data):
""" Convert index to datetime and all other columns to numeric
Parameters
----------
data : pd.DataFrame()
Input dataframe.
Returns
-------
pd.DataFrame()
Modified dataframe.
"""
# Set index
data.index = pd.to_datetime(data.index, error= "ignore")
# Format types to numeric
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data | python | def _set_TS_index(self, data):
""" Convert index to datetime and all other columns to numeric
Parameters
----------
data : pd.DataFrame()
Input dataframe.
Returns
-------
pd.DataFrame()
Modified dataframe.
"""
# Set index
data.index = pd.to_datetime(data.index, error= "ignore")
# Format types to numeric
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data | [
"def",
"_set_TS_index",
"(",
"self",
",",
"data",
")",
":",
"# Set index",
"data",
".",
"index",
"=",
"pd",
".",
"to_datetime",
"(",
"data",
".",
"index",
",",
"error",
"=",
"\"ignore\"",
")",
"# Format types to numeric",
"for",
"col",
"in",
"data",
".",
"columns",
":",
"data",
"[",
"col",
"]",
"=",
"pd",
".",
"to_numeric",
"(",
"data",
"[",
"col",
"]",
",",
"errors",
"=",
"\"coerce\"",
")",
"return",
"data"
] | Convert index to datetime and all other columns to numeric
Parameters
----------
data : pd.DataFrame()
Input dataframe.
Returns
-------
pd.DataFrame()
Modified dataframe. | [
"Convert",
"index",
"to",
"datetime",
"and",
"all",
"other",
"columns",
"to",
"numeric"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L283-L305 | train | 234,947 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data._utc_to_local | def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
""" Adjust index of dataframe according to timezone that is requested by user.
Parameters
----------
data : pd.DataFrame()
Pandas dataframe of json timeseries response from server.
local_zone : str
pytz.timezone string of specified local timezone to change index to.
Returns
-------
pd.DataFrame()
Pandas dataframe with timestamp index adjusted for local timezone.
"""
# Accounts for localtime shift
data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone)
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data | python | def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
""" Adjust index of dataframe according to timezone that is requested by user.
Parameters
----------
data : pd.DataFrame()
Pandas dataframe of json timeseries response from server.
local_zone : str
pytz.timezone string of specified local timezone to change index to.
Returns
-------
pd.DataFrame()
Pandas dataframe with timestamp index adjusted for local timezone.
"""
# Accounts for localtime shift
data.index = data.index.tz_localize(pytz.utc).tz_convert(local_zone)
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data | [
"def",
"_utc_to_local",
"(",
"self",
",",
"data",
",",
"local_zone",
"=",
"\"America/Los_Angeles\"",
")",
":",
"# Accounts for localtime shift",
"data",
".",
"index",
"=",
"data",
".",
"index",
".",
"tz_localize",
"(",
"pytz",
".",
"utc",
")",
".",
"tz_convert",
"(",
"local_zone",
")",
"# Gets rid of extra offset information so can compare with csv data",
"data",
".",
"index",
"=",
"data",
".",
"index",
".",
"tz_localize",
"(",
"None",
")",
"return",
"data"
] | Adjust index of dataframe according to timezone that is requested by user.
Parameters
----------
data : pd.DataFrame()
Pandas dataframe of json timeseries response from server.
local_zone : str
pytz.timezone string of specified local timezone to change index to.
Returns
-------
pd.DataFrame()
Pandas dataframe with timestamp index adjusted for local timezone. | [
"Adjust",
"index",
"of",
"dataframe",
"according",
"to",
"timezone",
"that",
"is",
"requested",
"by",
"user",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L308-L331 | train | 234,948 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data._local_to_utc | def _local_to_utc(self, timestamp, local_zone="America/Los_Angeles"):
""" Convert local timestamp to UTC.
Parameters
----------
timestamp : pd.DataFrame()
Input Pandas dataframe whose index needs to be changed.
local_zone : str
Name of local zone. Defaults to PST.
Returns
-------
pd.DataFrame()
Dataframe with UTC timestamps.
"""
timestamp_new = pd.to_datetime(timestamp, infer_datetime_format=True, errors='coerce')
timestamp_new = timestamp_new.tz_localize(local_zone).tz_convert(pytz.utc)
timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')
return timestamp_new | python | def _local_to_utc(self, timestamp, local_zone="America/Los_Angeles"):
""" Convert local timestamp to UTC.
Parameters
----------
timestamp : pd.DataFrame()
Input Pandas dataframe whose index needs to be changed.
local_zone : str
Name of local zone. Defaults to PST.
Returns
-------
pd.DataFrame()
Dataframe with UTC timestamps.
"""
timestamp_new = pd.to_datetime(timestamp, infer_datetime_format=True, errors='coerce')
timestamp_new = timestamp_new.tz_localize(local_zone).tz_convert(pytz.utc)
timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')
return timestamp_new | [
"def",
"_local_to_utc",
"(",
"self",
",",
"timestamp",
",",
"local_zone",
"=",
"\"America/Los_Angeles\"",
")",
":",
"timestamp_new",
"=",
"pd",
".",
"to_datetime",
"(",
"timestamp",
",",
"infer_datetime_format",
"=",
"True",
",",
"errors",
"=",
"'coerce'",
")",
"timestamp_new",
"=",
"timestamp_new",
".",
"tz_localize",
"(",
"local_zone",
")",
".",
"tz_convert",
"(",
"pytz",
".",
"utc",
")",
"timestamp_new",
"=",
"timestamp_new",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
"return",
"timestamp_new"
] | Convert local timestamp to UTC.
Parameters
----------
timestamp : pd.DataFrame()
Input Pandas dataframe whose index needs to be changed.
local_zone : str
Name of local zone. Defaults to PST.
Returns
-------
pd.DataFrame()
Dataframe with UTC timestamps. | [
"Convert",
"local",
"timestamp",
"to",
"UTC",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L334-L354 | train | 234,949 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.find_uuid | def find_uuid(self, obj, column_name):
""" Find uuid.
Parameters
----------
obj : ???
the object returned by the MDAL Query
column_name : str
input point returned from MDAL Query
Returns
-------
str
the uuid that correlates with the data
"""
keys = obj.context.keys()
for i in keys:
if column_name in obj.context[i]['?point']:
uuid = i
return i | python | def find_uuid(self, obj, column_name):
""" Find uuid.
Parameters
----------
obj : ???
the object returned by the MDAL Query
column_name : str
input point returned from MDAL Query
Returns
-------
str
the uuid that correlates with the data
"""
keys = obj.context.keys()
for i in keys:
if column_name in obj.context[i]['?point']:
uuid = i
return i | [
"def",
"find_uuid",
"(",
"self",
",",
"obj",
",",
"column_name",
")",
":",
"keys",
"=",
"obj",
".",
"context",
".",
"keys",
"(",
")",
"for",
"i",
"in",
"keys",
":",
"if",
"column_name",
"in",
"obj",
".",
"context",
"[",
"i",
"]",
"[",
"'?point'",
"]",
":",
"uuid",
"=",
"i",
"return",
"i"
] | Find uuid.
Parameters
----------
obj : ???
the object returned by the MDAL Query
column_name : str
input point returned from MDAL Query
Returns
-------
str
the uuid that correlates with the data | [
"Find",
"uuid",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L952-L975 | train | 234,950 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.identify_missing | def identify_missing(self, df, check_start=True):
""" Identify missing data.
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data.
check_start : bool
turns 0 to 1 for the first observation, to display the start of the data
as the beginning of the missing data event
Returns
-------
pd.DataFrame(), str
dataframe where 1 indicates missing data and 0 indicates reported data,
returns the column name generated from the MDAL Query
"""
# Check start changes the first value of df to 1, when the data stream is initially missing
# This allows the diff function to acknowledge the missing data
data_missing = df.isnull() * 1
col_name = str(data_missing.columns[0])
# When there is no data stream at the beginning we change it to 1
if check_start & data_missing[col_name][0] == 1:
data_missing[col_name][0] = 0
return data_missing, col_name | python | def identify_missing(self, df, check_start=True):
""" Identify missing data.
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data.
check_start : bool
turns 0 to 1 for the first observation, to display the start of the data
as the beginning of the missing data event
Returns
-------
pd.DataFrame(), str
dataframe where 1 indicates missing data and 0 indicates reported data,
returns the column name generated from the MDAL Query
"""
# Check start changes the first value of df to 1, when the data stream is initially missing
# This allows the diff function to acknowledge the missing data
data_missing = df.isnull() * 1
col_name = str(data_missing.columns[0])
# When there is no data stream at the beginning we change it to 1
if check_start & data_missing[col_name][0] == 1:
data_missing[col_name][0] = 0
return data_missing, col_name | [
"def",
"identify_missing",
"(",
"self",
",",
"df",
",",
"check_start",
"=",
"True",
")",
":",
"# Check start changes the first value of df to 1, when the data stream is initially missing",
"# This allows the diff function to acknowledge the missing data",
"data_missing",
"=",
"df",
".",
"isnull",
"(",
")",
"*",
"1",
"col_name",
"=",
"str",
"(",
"data_missing",
".",
"columns",
"[",
"0",
"]",
")",
"# When there is no data stream at the beginning we change it to 1",
"if",
"check_start",
"&",
"data_missing",
"[",
"col_name",
"]",
"[",
"0",
"]",
"==",
"1",
":",
"data_missing",
"[",
"col_name",
"]",
"[",
"0",
"]",
"=",
"0",
"return",
"data_missing",
",",
"col_name"
] | Identify missing data.
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data.
check_start : bool
turns 0 to 1 for the first observation, to display the start of the data
as the beginning of the missing data event
Returns
-------
pd.DataFrame(), str
dataframe where 1 indicates missing data and 0 indicates reported data,
returns the column name generated from the MDAL Query | [
"Identify",
"missing",
"data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L978-L1006 | train | 234,951 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.diff_boolean | def diff_boolean(self, df, column_name=None, uuid=None, duration=True, min_event_filter='3 hours'):
""" takes the dataframe of missing values, and returns a dataframe that indicates the
length of each event where data was continuously missing
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data (must be in boolean format where 1 indicates missing data.
column_name : str
the original column name produced by MDAL Query
uuid : str
the uuid associated with the meter, if known
duration : bool
If True, the duration will be displayed in the results. If false the column will be dropped.
min_event_filter : str
Filters out the events that are less than the given time period
Returns
-------
pd.DataFrame()
dataframe with the start time of the event (as the index),
end time of the event (first time when data is reported)
"""
if uuid == None:
uuid = 'End'
data_gaps = df[(df.diff() == 1) | (df.diff() == -1)].dropna()
data_gaps["duration"] = abs(data_gaps.index.to_series().diff(periods=-1))
data_gaps[uuid] = data_gaps.index + (data_gaps["duration"])
data_gaps = data_gaps[data_gaps["duration"] > pd.Timedelta(min_event_filter)]
data_gaps = data_gaps[data_gaps[column_name] == 1]
data_gaps.pop(column_name)
if not duration:
data_gaps.pop('duration')
data_gaps.index = data_gaps.index.strftime(date_format="%Y-%m-%d %H:%M:%S")
data_gaps[uuid] = data_gaps[uuid].dt.strftime(date_format="%Y-%m-%d %H:%M:%S")
return data_gaps | python | def diff_boolean(self, df, column_name=None, uuid=None, duration=True, min_event_filter='3 hours'):
""" takes the dataframe of missing values, and returns a dataframe that indicates the
length of each event where data was continuously missing
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data (must be in boolean format where 1 indicates missing data.
column_name : str
the original column name produced by MDAL Query
uuid : str
the uuid associated with the meter, if known
duration : bool
If True, the duration will be displayed in the results. If false the column will be dropped.
min_event_filter : str
Filters out the events that are less than the given time period
Returns
-------
pd.DataFrame()
dataframe with the start time of the event (as the index),
end time of the event (first time when data is reported)
"""
if uuid == None:
uuid = 'End'
data_gaps = df[(df.diff() == 1) | (df.diff() == -1)].dropna()
data_gaps["duration"] = abs(data_gaps.index.to_series().diff(periods=-1))
data_gaps[uuid] = data_gaps.index + (data_gaps["duration"])
data_gaps = data_gaps[data_gaps["duration"] > pd.Timedelta(min_event_filter)]
data_gaps = data_gaps[data_gaps[column_name] == 1]
data_gaps.pop(column_name)
if not duration:
data_gaps.pop('duration')
data_gaps.index = data_gaps.index.strftime(date_format="%Y-%m-%d %H:%M:%S")
data_gaps[uuid] = data_gaps[uuid].dt.strftime(date_format="%Y-%m-%d %H:%M:%S")
return data_gaps | [
"def",
"diff_boolean",
"(",
"self",
",",
"df",
",",
"column_name",
"=",
"None",
",",
"uuid",
"=",
"None",
",",
"duration",
"=",
"True",
",",
"min_event_filter",
"=",
"'3 hours'",
")",
":",
"if",
"uuid",
"==",
"None",
":",
"uuid",
"=",
"'End'",
"data_gaps",
"=",
"df",
"[",
"(",
"df",
".",
"diff",
"(",
")",
"==",
"1",
")",
"|",
"(",
"df",
".",
"diff",
"(",
")",
"==",
"-",
"1",
")",
"]",
".",
"dropna",
"(",
")",
"data_gaps",
"[",
"\"duration\"",
"]",
"=",
"abs",
"(",
"data_gaps",
".",
"index",
".",
"to_series",
"(",
")",
".",
"diff",
"(",
"periods",
"=",
"-",
"1",
")",
")",
"data_gaps",
"[",
"uuid",
"]",
"=",
"data_gaps",
".",
"index",
"+",
"(",
"data_gaps",
"[",
"\"duration\"",
"]",
")",
"data_gaps",
"=",
"data_gaps",
"[",
"data_gaps",
"[",
"\"duration\"",
"]",
">",
"pd",
".",
"Timedelta",
"(",
"min_event_filter",
")",
"]",
"data_gaps",
"=",
"data_gaps",
"[",
"data_gaps",
"[",
"column_name",
"]",
"==",
"1",
"]",
"data_gaps",
".",
"pop",
"(",
"column_name",
")",
"if",
"not",
"duration",
":",
"data_gaps",
".",
"pop",
"(",
"'duration'",
")",
"data_gaps",
".",
"index",
"=",
"data_gaps",
".",
"index",
".",
"strftime",
"(",
"date_format",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"data_gaps",
"[",
"uuid",
"]",
"=",
"data_gaps",
"[",
"uuid",
"]",
".",
"dt",
".",
"strftime",
"(",
"date_format",
"=",
"\"%Y-%m-%d %H:%M:%S\"",
")",
"return",
"data_gaps"
] | takes the dataframe of missing values, and returns a dataframe that indicates the
length of each event where data was continuously missing
Parameters
----------
df : pd.DataFrame()
Dataframe to check for missing data (must be in boolean format where 1 indicates missing data.
column_name : str
the original column name produced by MDAL Query
uuid : str
the uuid associated with the meter, if known
duration : bool
If True, the duration will be displayed in the results. If false the column will be dropped.
min_event_filter : str
Filters out the events that are less than the given time period
Returns
-------
pd.DataFrame()
dataframe with the start time of the event (as the index),
end time of the event (first time when data is reported) | [
"takes",
"the",
"dataframe",
"of",
"missing",
"values",
"and",
"returns",
"a",
"dataframe",
"that",
"indicates",
"the",
"length",
"of",
"each",
"event",
"where",
"data",
"was",
"continuously",
"missing"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L1009-L1050 | train | 234,952 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.analyze_quality_table | def analyze_quality_table(self, obj,low_bound=None, high_bound=None):
""" Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df. Returns a df of data quality metrics
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query
low_bound : float
all data equal to or below this value will be interpreted as missing data
high_bound : float
all data above this value will be interpreted as missing
Returns
-------
pd.DataFrame()
returns data frame with % missing data, average duration of missing data
event and standard deviation of that duration for each column of data
"""
data = obj.df
N_rows = 3
N_cols = data.shape[1]
d = pd.DataFrame(np.zeros((N_rows, N_cols)),
index=['% Missing', 'AVG Length Missing', 'Std dev. Missing'],
columns=[data.columns])
if low_bound:
data = data.where(data >= low_bound)
if high_bound:
data=data.where(data < high_bound)
for i in range(N_cols):
data_per_meter = data.iloc[:, [i]]
data_missing, meter = self.identify_missing(data_per_meter)
percentage = data_missing.sum() / (data.shape[0]) * 100
data_gaps = self.diff_boolean(data_missing, column_name=meter)
missing_mean = data_gaps.mean()
std_dev = data_gaps.std()
d.loc["% Missing", meter] = percentage[meter]
d.loc["AVG Length Missing", meter] = missing_mean['duration']
d.loc["Std dev. Missing", meter] = std_dev['duration']
return d | python | def analyze_quality_table(self, obj,low_bound=None, high_bound=None):
""" Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df. Returns a df of data quality metrics
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query
low_bound : float
all data equal to or below this value will be interpreted as missing data
high_bound : float
all data above this value will be interpreted as missing
Returns
-------
pd.DataFrame()
returns data frame with % missing data, average duration of missing data
event and standard deviation of that duration for each column of data
"""
data = obj.df
N_rows = 3
N_cols = data.shape[1]
d = pd.DataFrame(np.zeros((N_rows, N_cols)),
index=['% Missing', 'AVG Length Missing', 'Std dev. Missing'],
columns=[data.columns])
if low_bound:
data = data.where(data >= low_bound)
if high_bound:
data=data.where(data < high_bound)
for i in range(N_cols):
data_per_meter = data.iloc[:, [i]]
data_missing, meter = self.identify_missing(data_per_meter)
percentage = data_missing.sum() / (data.shape[0]) * 100
data_gaps = self.diff_boolean(data_missing, column_name=meter)
missing_mean = data_gaps.mean()
std_dev = data_gaps.std()
d.loc["% Missing", meter] = percentage[meter]
d.loc["AVG Length Missing", meter] = missing_mean['duration']
d.loc["Std dev. Missing", meter] = std_dev['duration']
return d | [
"def",
"analyze_quality_table",
"(",
"self",
",",
"obj",
",",
"low_bound",
"=",
"None",
",",
"high_bound",
"=",
"None",
")",
":",
"data",
"=",
"obj",
".",
"df",
"N_rows",
"=",
"3",
"N_cols",
"=",
"data",
".",
"shape",
"[",
"1",
"]",
"d",
"=",
"pd",
".",
"DataFrame",
"(",
"np",
".",
"zeros",
"(",
"(",
"N_rows",
",",
"N_cols",
")",
")",
",",
"index",
"=",
"[",
"'% Missing'",
",",
"'AVG Length Missing'",
",",
"'Std dev. Missing'",
"]",
",",
"columns",
"=",
"[",
"data",
".",
"columns",
"]",
")",
"if",
"low_bound",
":",
"data",
"=",
"data",
".",
"where",
"(",
"data",
">=",
"low_bound",
")",
"if",
"high_bound",
":",
"data",
"=",
"data",
".",
"where",
"(",
"data",
"<",
"high_bound",
")",
"for",
"i",
"in",
"range",
"(",
"N_cols",
")",
":",
"data_per_meter",
"=",
"data",
".",
"iloc",
"[",
":",
",",
"[",
"i",
"]",
"]",
"data_missing",
",",
"meter",
"=",
"self",
".",
"identify_missing",
"(",
"data_per_meter",
")",
"percentage",
"=",
"data_missing",
".",
"sum",
"(",
")",
"/",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"100",
"data_gaps",
"=",
"self",
".",
"diff_boolean",
"(",
"data_missing",
",",
"column_name",
"=",
"meter",
")",
"missing_mean",
"=",
"data_gaps",
".",
"mean",
"(",
")",
"std_dev",
"=",
"data_gaps",
".",
"std",
"(",
")",
"d",
".",
"loc",
"[",
"\"% Missing\"",
",",
"meter",
"]",
"=",
"percentage",
"[",
"meter",
"]",
"d",
".",
"loc",
"[",
"\"AVG Length Missing\"",
",",
"meter",
"]",
"=",
"missing_mean",
"[",
"'duration'",
"]",
"d",
".",
"loc",
"[",
"\"Std dev. Missing\"",
",",
"meter",
"]",
"=",
"std_dev",
"[",
"'duration'",
"]",
"return",
"d"
] | Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df. Returns a df of data quality metrics
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query
low_bound : float
all data equal to or below this value will be interpreted as missing data
high_bound : float
all data above this value will be interpreted as missing
Returns
-------
pd.DataFrame()
returns data frame with % missing data, average duration of missing data
event and standard deviation of that duration for each column of data | [
"Takes",
"in",
"an",
"the",
"object",
"returned",
"by",
"the",
"MDAL",
"query",
"and",
"analyzes",
"the",
"quality",
"of",
"the",
"data",
"for",
"each",
"column",
"in",
"the",
"df",
".",
"Returns",
"a",
"df",
"of",
"data",
"quality",
"metrics"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L1053-L1108 | train | 234,953 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Clean_Data.py | Clean_Data.analyze_quality_graph | def analyze_quality_graph(self, obj):
""" Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df in the form of graphs. The Graphs returned
show missing data events over time, and missing data frequency during each hour
of the day
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query
"""
data = obj.df
for i in range(data.shape[1]):
data_per_meter = data.iloc[:, [i]] # need to make this work or change the structure
data_missing, meter = self.identify_missing(data_per_meter)
percentage = data_missing.sum() / (data.shape[0]) * 100
print('Percentage Missing of ' + meter + ' data: ' + str(int(percentage)) + '%')
data_missing.plot(figsize=(18, 5), x_compat=True, title=meter + " Missing Data over the Time interval")
data_gaps = self.diff_boolean(data_missing, column_name=meter)
data_missing['Hour'] = data_missing.index.hour
ymax = int(data_missing.groupby('Hour').sum().max() + 10)
data_missing.groupby('Hour').sum().plot(ylim=(0, ymax), figsize=(18, 5),
title=meter + " Time of Day of Missing Data")
print(data_gaps) | python | def analyze_quality_graph(self, obj):
""" Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df in the form of graphs. The Graphs returned
show missing data events over time, and missing data frequency during each hour
of the day
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query
"""
data = obj.df
for i in range(data.shape[1]):
data_per_meter = data.iloc[:, [i]] # need to make this work or change the structure
data_missing, meter = self.identify_missing(data_per_meter)
percentage = data_missing.sum() / (data.shape[0]) * 100
print('Percentage Missing of ' + meter + ' data: ' + str(int(percentage)) + '%')
data_missing.plot(figsize=(18, 5), x_compat=True, title=meter + " Missing Data over the Time interval")
data_gaps = self.diff_boolean(data_missing, column_name=meter)
data_missing['Hour'] = data_missing.index.hour
ymax = int(data_missing.groupby('Hour').sum().max() + 10)
data_missing.groupby('Hour').sum().plot(ylim=(0, ymax), figsize=(18, 5),
title=meter + " Time of Day of Missing Data")
print(data_gaps) | [
"def",
"analyze_quality_graph",
"(",
"self",
",",
"obj",
")",
":",
"data",
"=",
"obj",
".",
"df",
"for",
"i",
"in",
"range",
"(",
"data",
".",
"shape",
"[",
"1",
"]",
")",
":",
"data_per_meter",
"=",
"data",
".",
"iloc",
"[",
":",
",",
"[",
"i",
"]",
"]",
"# need to make this work or change the structure",
"data_missing",
",",
"meter",
"=",
"self",
".",
"identify_missing",
"(",
"data_per_meter",
")",
"percentage",
"=",
"data_missing",
".",
"sum",
"(",
")",
"/",
"(",
"data",
".",
"shape",
"[",
"0",
"]",
")",
"*",
"100",
"print",
"(",
"'Percentage Missing of '",
"+",
"meter",
"+",
"' data: '",
"+",
"str",
"(",
"int",
"(",
"percentage",
")",
")",
"+",
"'%'",
")",
"data_missing",
".",
"plot",
"(",
"figsize",
"=",
"(",
"18",
",",
"5",
")",
",",
"x_compat",
"=",
"True",
",",
"title",
"=",
"meter",
"+",
"\" Missing Data over the Time interval\"",
")",
"data_gaps",
"=",
"self",
".",
"diff_boolean",
"(",
"data_missing",
",",
"column_name",
"=",
"meter",
")",
"data_missing",
"[",
"'Hour'",
"]",
"=",
"data_missing",
".",
"index",
".",
"hour",
"ymax",
"=",
"int",
"(",
"data_missing",
".",
"groupby",
"(",
"'Hour'",
")",
".",
"sum",
"(",
")",
".",
"max",
"(",
")",
"+",
"10",
")",
"data_missing",
".",
"groupby",
"(",
"'Hour'",
")",
".",
"sum",
"(",
")",
".",
"plot",
"(",
"ylim",
"=",
"(",
"0",
",",
"ymax",
")",
",",
"figsize",
"=",
"(",
"18",
",",
"5",
")",
",",
"title",
"=",
"meter",
"+",
"\" Time of Day of Missing Data\"",
")",
"print",
"(",
"data_gaps",
")"
] | Takes in an the object returned by the MDAL query, and analyzes the quality
of the data for each column in the df in the form of graphs. The Graphs returned
show missing data events over time, and missing data frequency during each hour
of the day
To Do
-----
Need to make it specific for varying meters and label it for each type,
Either separate functions or make the function broader
Parameters
----------
obj : ???
the object returned by the MDAL Query | [
"Takes",
"in",
"an",
"the",
"object",
"returned",
"by",
"the",
"MDAL",
"query",
"and",
"analyzes",
"the",
"quality",
"of",
"the",
"data",
"for",
"each",
"column",
"in",
"the",
"df",
"in",
"the",
"form",
"of",
"graphs",
".",
"The",
"Graphs",
"returned",
"show",
"missing",
"data",
"events",
"over",
"time",
"and",
"missing",
"data",
"frequency",
"during",
"each",
"hour",
"of",
"the",
"day"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Clean_Data.py#L1111-L1147 | train | 234,954 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Clean_Data.py | Clean_Data.clean_data | def clean_data(self, resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=9998):
""" Clean dataframe.
Parameters
----------
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
"""
# Store copy of the original data
data = self.original_data
if resample:
try:
data = self.resample_data(data, freq, resampler)
except Exception as e:
raise e
if interpolate:
try:
data = self.interpolate_data(data, limit=limit, method=method)
except Exception as e:
raise e
if remove_na:
try:
data = self.remove_na(data, remove_na_how)
except Exception as e:
raise e
if remove_outliers:
try:
data = self.remove_outliers(data, sd_val)
except Exception as e:
raise e
if remove_out_of_bounds:
try:
data = self.remove_out_of_bounds(data, low_bound, high_bound)
except Exception as e:
raise e
self.cleaned_data = data | python | def clean_data(self, resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=9998):
""" Clean dataframe.
Parameters
----------
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
"""
# Store copy of the original data
data = self.original_data
if resample:
try:
data = self.resample_data(data, freq, resampler)
except Exception as e:
raise e
if interpolate:
try:
data = self.interpolate_data(data, limit=limit, method=method)
except Exception as e:
raise e
if remove_na:
try:
data = self.remove_na(data, remove_na_how)
except Exception as e:
raise e
if remove_outliers:
try:
data = self.remove_outliers(data, sd_val)
except Exception as e:
raise e
if remove_out_of_bounds:
try:
data = self.remove_out_of_bounds(data, low_bound, high_bound)
except Exception as e:
raise e
self.cleaned_data = data | [
"def",
"clean_data",
"(",
"self",
",",
"resample",
"=",
"True",
",",
"freq",
"=",
"'h'",
",",
"resampler",
"=",
"'mean'",
",",
"interpolate",
"=",
"True",
",",
"limit",
"=",
"1",
",",
"method",
"=",
"'linear'",
",",
"remove_na",
"=",
"True",
",",
"remove_na_how",
"=",
"'any'",
",",
"remove_outliers",
"=",
"True",
",",
"sd_val",
"=",
"3",
",",
"remove_out_of_bounds",
"=",
"True",
",",
"low_bound",
"=",
"0",
",",
"high_bound",
"=",
"9998",
")",
":",
"# Store copy of the original data",
"data",
"=",
"self",
".",
"original_data",
"if",
"resample",
":",
"try",
":",
"data",
"=",
"self",
".",
"resample_data",
"(",
"data",
",",
"freq",
",",
"resampler",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"interpolate",
":",
"try",
":",
"data",
"=",
"self",
".",
"interpolate_data",
"(",
"data",
",",
"limit",
"=",
"limit",
",",
"method",
"=",
"method",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"remove_na",
":",
"try",
":",
"data",
"=",
"self",
".",
"remove_na",
"(",
"data",
",",
"remove_na_how",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"remove_outliers",
":",
"try",
":",
"data",
"=",
"self",
".",
"remove_outliers",
"(",
"data",
",",
"sd_val",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"if",
"remove_out_of_bounds",
":",
"try",
":",
"data",
"=",
"self",
".",
"remove_out_of_bounds",
"(",
"data",
",",
"low_bound",
",",
"high_bound",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"e",
"self",
".",
"cleaned_data",
"=",
"data"
] | Clean dataframe.
Parameters
----------
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data. | [
"Clean",
"dataframe",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Clean_Data.py#L198-L269 | train | 234,955 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.write_json | def write_json(self):
""" Dump data into json file. """
with open(self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json', 'a') as f:
json.dump(self.result, f) | python | def write_json(self):
""" Dump data into json file. """
with open(self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json', 'a') as f:
json.dump(self.result, f) | [
"def",
"write_json",
"(",
"self",
")",
":",
"with",
"open",
"(",
"self",
".",
"results_folder_name",
"+",
"'/results-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.json'",
",",
"'a'",
")",
"as",
"f",
":",
"json",
".",
"dump",
"(",
"self",
".",
"result",
",",
"f",
")"
] | Dump data into json file. | [
"Dump",
"data",
"into",
"json",
"file",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L143-L147 | train | 234,956 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.site_analysis | def site_analysis(self, folder_name, site_install_mapping, end_date):
""" Summarize site data into a single table.
folder_name : str
Folder where all site data resides.
site_event_mapping : dic
Dictionary of site name to date of installation.
end_date : str
End date of data collected.
"""
def count_number_of_days(site, end_date):
""" Counts the number of days between two dates.
Parameters
----------
site : str
Key to a dic containing site_name -> pelican installation date.
end_date : str
End date.
Returns
-------
int
Number of days
"""
start_date = site_install_mapping[site]
start_date = start_date.split('-')
start = date(int(start_date[0]), int(start_date[1]), int(start_date[2]))
end_date = end_date.split('-')
end = date(int(end_date[0]), int(end_date[1]), int(end_date[2]))
delta = end - start
return delta.days
if not folder_name or not isinstance(folder_name, str):
raise TypeError("folder_name should be type string")
else:
list_json_files = []
df = pd.DataFrame()
temp_df = pd.DataFrame()
json_files = [f for f in os.listdir(folder_name) if f.endswith('.json')]
for json_file in json_files:
with open(folder_name + json_file) as f:
js = json.load(f)
num_days = count_number_of_days(js['Site'], end_date)
e_abs_sav = round(js['Energy Savings (absolute)'] / 1000, 2) # Energy Absolute Savings
e_perc_sav = round(js['Energy Savings (%)'], 2) # Energy Percent Savings
ann_e_abs_sav = (e_abs_sav / num_days) * 365 # Annualized Energy Absolute Savings
d_abs_sav = round(js['User Comments']['Dollar Savings (absolute)'], 2) # Dollar Absolute Savings
d_perc_sav = round(js['User Comments']['Dollar Savings (%)'], 2) # Dollar Percent Savings
ann_d_abs_sav = (d_abs_sav / num_days) * 365 # Annualized Dollar Absolute Savings
temp_df = pd.DataFrame({
'Site': js['Site'],
'#Days since Pelican Installation': num_days,
'Energy Savings (%)': e_perc_sav,
'Energy Savings (kWh)': e_abs_sav,
'Annualized Energy Savings (kWh)': ann_e_abs_sav,
'Dollar Savings (%)': d_perc_sav,
'Dollar Savings ($)': d_abs_sav,
'Annualized Dollar Savings ($)': ann_d_abs_sav,
'Best Model': js['Model']['Optimal Model\'s Metrics']['name'],
'Adj R2': round(js['Model']['Optimal Model\'s Metrics']['adj_cross_val_score'], 2),
'RMSE': round(js['Model']['Optimal Model\'s Metrics']['rmse'], 2),
'MAPE': js['Model']['Optimal Model\'s Metrics']['mape'],
'Uncertainity': js['Uncertainity'],
}, index=[0])
df = df.append(temp_df)
df.set_index('Site', inplace=True)
return df | python | def site_analysis(self, folder_name, site_install_mapping, end_date):
""" Summarize site data into a single table.
folder_name : str
Folder where all site data resides.
site_event_mapping : dic
Dictionary of site name to date of installation.
end_date : str
End date of data collected.
"""
def count_number_of_days(site, end_date):
""" Counts the number of days between two dates.
Parameters
----------
site : str
Key to a dic containing site_name -> pelican installation date.
end_date : str
End date.
Returns
-------
int
Number of days
"""
start_date = site_install_mapping[site]
start_date = start_date.split('-')
start = date(int(start_date[0]), int(start_date[1]), int(start_date[2]))
end_date = end_date.split('-')
end = date(int(end_date[0]), int(end_date[1]), int(end_date[2]))
delta = end - start
return delta.days
if not folder_name or not isinstance(folder_name, str):
raise TypeError("folder_name should be type string")
else:
list_json_files = []
df = pd.DataFrame()
temp_df = pd.DataFrame()
json_files = [f for f in os.listdir(folder_name) if f.endswith('.json')]
for json_file in json_files:
with open(folder_name + json_file) as f:
js = json.load(f)
num_days = count_number_of_days(js['Site'], end_date)
e_abs_sav = round(js['Energy Savings (absolute)'] / 1000, 2) # Energy Absolute Savings
e_perc_sav = round(js['Energy Savings (%)'], 2) # Energy Percent Savings
ann_e_abs_sav = (e_abs_sav / num_days) * 365 # Annualized Energy Absolute Savings
d_abs_sav = round(js['User Comments']['Dollar Savings (absolute)'], 2) # Dollar Absolute Savings
d_perc_sav = round(js['User Comments']['Dollar Savings (%)'], 2) # Dollar Percent Savings
ann_d_abs_sav = (d_abs_sav / num_days) * 365 # Annualized Dollar Absolute Savings
temp_df = pd.DataFrame({
'Site': js['Site'],
'#Days since Pelican Installation': num_days,
'Energy Savings (%)': e_perc_sav,
'Energy Savings (kWh)': e_abs_sav,
'Annualized Energy Savings (kWh)': ann_e_abs_sav,
'Dollar Savings (%)': d_perc_sav,
'Dollar Savings ($)': d_abs_sav,
'Annualized Dollar Savings ($)': ann_d_abs_sav,
'Best Model': js['Model']['Optimal Model\'s Metrics']['name'],
'Adj R2': round(js['Model']['Optimal Model\'s Metrics']['adj_cross_val_score'], 2),
'RMSE': round(js['Model']['Optimal Model\'s Metrics']['rmse'], 2),
'MAPE': js['Model']['Optimal Model\'s Metrics']['mape'],
'Uncertainity': js['Uncertainity'],
}, index=[0])
df = df.append(temp_df)
df.set_index('Site', inplace=True)
return df | [
"def",
"site_analysis",
"(",
"self",
",",
"folder_name",
",",
"site_install_mapping",
",",
"end_date",
")",
":",
"def",
"count_number_of_days",
"(",
"site",
",",
"end_date",
")",
":",
"\"\"\" Counts the number of days between two dates.\n\n Parameters\n ----------\n site : str\n Key to a dic containing site_name -> pelican installation date.\n end_date : str\n End date.\n\n Returns\n -------\n int\n Number of days\n\n \"\"\"",
"start_date",
"=",
"site_install_mapping",
"[",
"site",
"]",
"start_date",
"=",
"start_date",
".",
"split",
"(",
"'-'",
")",
"start",
"=",
"date",
"(",
"int",
"(",
"start_date",
"[",
"0",
"]",
")",
",",
"int",
"(",
"start_date",
"[",
"1",
"]",
")",
",",
"int",
"(",
"start_date",
"[",
"2",
"]",
")",
")",
"end_date",
"=",
"end_date",
".",
"split",
"(",
"'-'",
")",
"end",
"=",
"date",
"(",
"int",
"(",
"end_date",
"[",
"0",
"]",
")",
",",
"int",
"(",
"end_date",
"[",
"1",
"]",
")",
",",
"int",
"(",
"end_date",
"[",
"2",
"]",
")",
")",
"delta",
"=",
"end",
"-",
"start",
"return",
"delta",
".",
"days",
"if",
"not",
"folder_name",
"or",
"not",
"isinstance",
"(",
"folder_name",
",",
"str",
")",
":",
"raise",
"TypeError",
"(",
"\"folder_name should be type string\"",
")",
"else",
":",
"list_json_files",
"=",
"[",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"temp_df",
"=",
"pd",
".",
"DataFrame",
"(",
")",
"json_files",
"=",
"[",
"f",
"for",
"f",
"in",
"os",
".",
"listdir",
"(",
"folder_name",
")",
"if",
"f",
".",
"endswith",
"(",
"'.json'",
")",
"]",
"for",
"json_file",
"in",
"json_files",
":",
"with",
"open",
"(",
"folder_name",
"+",
"json_file",
")",
"as",
"f",
":",
"js",
"=",
"json",
".",
"load",
"(",
"f",
")",
"num_days",
"=",
"count_number_of_days",
"(",
"js",
"[",
"'Site'",
"]",
",",
"end_date",
")",
"e_abs_sav",
"=",
"round",
"(",
"js",
"[",
"'Energy Savings (absolute)'",
"]",
"/",
"1000",
",",
"2",
")",
"# Energy Absolute Savings",
"e_perc_sav",
"=",
"round",
"(",
"js",
"[",
"'Energy Savings (%)'",
"]",
",",
"2",
")",
"# Energy Percent Savings",
"ann_e_abs_sav",
"=",
"(",
"e_abs_sav",
"/",
"num_days",
")",
"*",
"365",
"# Annualized Energy Absolute Savings",
"d_abs_sav",
"=",
"round",
"(",
"js",
"[",
"'User Comments'",
"]",
"[",
"'Dollar Savings (absolute)'",
"]",
",",
"2",
")",
"# Dollar Absolute Savings",
"d_perc_sav",
"=",
"round",
"(",
"js",
"[",
"'User Comments'",
"]",
"[",
"'Dollar Savings (%)'",
"]",
",",
"2",
")",
"# Dollar Percent Savings",
"ann_d_abs_sav",
"=",
"(",
"d_abs_sav",
"/",
"num_days",
")",
"*",
"365",
"# Annualized Dollar Absolute Savings",
"temp_df",
"=",
"pd",
".",
"DataFrame",
"(",
"{",
"'Site'",
":",
"js",
"[",
"'Site'",
"]",
",",
"'#Days since Pelican Installation'",
":",
"num_days",
",",
"'Energy Savings (%)'",
":",
"e_perc_sav",
",",
"'Energy Savings (kWh)'",
":",
"e_abs_sav",
",",
"'Annualized Energy Savings (kWh)'",
":",
"ann_e_abs_sav",
",",
"'Dollar Savings (%)'",
":",
"d_perc_sav",
",",
"'Dollar Savings ($)'",
":",
"d_abs_sav",
",",
"'Annualized Dollar Savings ($)'",
":",
"ann_d_abs_sav",
",",
"'Best Model'",
":",
"js",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'name'",
"]",
",",
"'Adj R2'",
":",
"round",
"(",
"js",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'adj_cross_val_score'",
"]",
",",
"2",
")",
",",
"'RMSE'",
":",
"round",
"(",
"js",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'rmse'",
"]",
",",
"2",
")",
",",
"'MAPE'",
":",
"js",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'mape'",
"]",
",",
"'Uncertainity'",
":",
"js",
"[",
"'Uncertainity'",
"]",
",",
"}",
",",
"index",
"=",
"[",
"0",
"]",
")",
"df",
"=",
"df",
".",
"append",
"(",
"temp_df",
")",
"df",
".",
"set_index",
"(",
"'Site'",
",",
"inplace",
"=",
"True",
")",
"return",
"df"
] | Summarize site data into a single table.
folder_name : str
Folder where all site data resides.
site_event_mapping : dic
Dictionary of site name to date of installation.
end_date : str
End date of data collected. | [
"Summarize",
"site",
"data",
"into",
"a",
"single",
"table",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L150-L235 | train | 234,957 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.search | def search(self, file_name, imported_data=None):
""" Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data.
"""
resample_freq=['15T', 'h', 'd']
time_freq = {
'year' : [True, False, False, False, False],
'month' : [False, True, False, False, False],
'week' : [False, False, True, False, False],
'tod' : [False, False, False, True, False],
'dow' : [False, False, False, False, True],
}
optimal_score = float('-inf')
optimal_model = None
# CSV Files
if not imported_data:
with open(file_name) as f:
input_json = json.load(f)
import_json = input_json['Import']
imported_data = self.import_data(file_name=import_json['File Name'], folder_name=import_json['Folder Name'],
head_row=import_json['Head Row'], index_col=import_json['Index Col'],
convert_col=import_json['Convert Col'], concat_files=import_json['Concat Files'],
save_file=import_json['Save File'])
with open(file_name) as f:
input_json = json.load(f)
for x in resample_freq: # Resample data interval
input_json['Clean']['Frequency'] = x
for i in range(len(time_freq.items())): # Add time features
input_json['Preprocess']['Year'] = time_freq['year'][i]
input_json['Preprocess']['Month'] = time_freq['month'][i]
input_json['Preprocess']['Week'] = time_freq['week'][i]
input_json['Preprocess']['Time of Day'] = time_freq['tod'][i]
input_json['Preprocess']['Day of Week'] = time_freq['dow'][i]
# Putting comment in json file to indicate which parameters have been changed
time_feature = None
for key in time_freq:
if time_freq[key][i]:
time_feature = key
self.result['Comment'] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature
# Read parameters in input_json
self.read_json(file_name=None, input_json=input_json, imported_data=imported_data)
# Keep track of highest adj_r2 score
if self.result['Model']['Optimal Model\'s Metrics']['adj_r2'] > optimal_score:
optimal_score = self.result['Model']['Optimal Model\'s Metrics']['adj_r2']
optimal_model_file_name = self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json'
# Wrapper.global_count += 1
print('Most optimal model: ', optimal_model_file_name)
freq = self.result['Comment'].split(' ')[1][:-1]
time_feat = self.result['Comment'].split(' ')[-1]
print('Freq: ', freq, 'Time Feature: ', time_feat) | python | def search(self, file_name, imported_data=None):
""" Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data.
"""
resample_freq=['15T', 'h', 'd']
time_freq = {
'year' : [True, False, False, False, False],
'month' : [False, True, False, False, False],
'week' : [False, False, True, False, False],
'tod' : [False, False, False, True, False],
'dow' : [False, False, False, False, True],
}
optimal_score = float('-inf')
optimal_model = None
# CSV Files
if not imported_data:
with open(file_name) as f:
input_json = json.load(f)
import_json = input_json['Import']
imported_data = self.import_data(file_name=import_json['File Name'], folder_name=import_json['Folder Name'],
head_row=import_json['Head Row'], index_col=import_json['Index Col'],
convert_col=import_json['Convert Col'], concat_files=import_json['Concat Files'],
save_file=import_json['Save File'])
with open(file_name) as f:
input_json = json.load(f)
for x in resample_freq: # Resample data interval
input_json['Clean']['Frequency'] = x
for i in range(len(time_freq.items())): # Add time features
input_json['Preprocess']['Year'] = time_freq['year'][i]
input_json['Preprocess']['Month'] = time_freq['month'][i]
input_json['Preprocess']['Week'] = time_freq['week'][i]
input_json['Preprocess']['Time of Day'] = time_freq['tod'][i]
input_json['Preprocess']['Day of Week'] = time_freq['dow'][i]
# Putting comment in json file to indicate which parameters have been changed
time_feature = None
for key in time_freq:
if time_freq[key][i]:
time_feature = key
self.result['Comment'] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature
# Read parameters in input_json
self.read_json(file_name=None, input_json=input_json, imported_data=imported_data)
# Keep track of highest adj_r2 score
if self.result['Model']['Optimal Model\'s Metrics']['adj_r2'] > optimal_score:
optimal_score = self.result['Model']['Optimal Model\'s Metrics']['adj_r2']
optimal_model_file_name = self.results_folder_name + '/results-' + str(self.get_global_count()) + '.json'
# Wrapper.global_count += 1
print('Most optimal model: ', optimal_model_file_name)
freq = self.result['Comment'].split(' ')[1][:-1]
time_feat = self.result['Comment'].split(' ')[-1]
print('Freq: ', freq, 'Time Feature: ', time_feat) | [
"def",
"search",
"(",
"self",
",",
"file_name",
",",
"imported_data",
"=",
"None",
")",
":",
"resample_freq",
"=",
"[",
"'15T'",
",",
"'h'",
",",
"'d'",
"]",
"time_freq",
"=",
"{",
"'year'",
":",
"[",
"True",
",",
"False",
",",
"False",
",",
"False",
",",
"False",
"]",
",",
"'month'",
":",
"[",
"False",
",",
"True",
",",
"False",
",",
"False",
",",
"False",
"]",
",",
"'week'",
":",
"[",
"False",
",",
"False",
",",
"True",
",",
"False",
",",
"False",
"]",
",",
"'tod'",
":",
"[",
"False",
",",
"False",
",",
"False",
",",
"True",
",",
"False",
"]",
",",
"'dow'",
":",
"[",
"False",
",",
"False",
",",
"False",
",",
"False",
",",
"True",
"]",
",",
"}",
"optimal_score",
"=",
"float",
"(",
"'-inf'",
")",
"optimal_model",
"=",
"None",
"# CSV Files",
"if",
"not",
"imported_data",
":",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"input_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"import_json",
"=",
"input_json",
"[",
"'Import'",
"]",
"imported_data",
"=",
"self",
".",
"import_data",
"(",
"file_name",
"=",
"import_json",
"[",
"'File Name'",
"]",
",",
"folder_name",
"=",
"import_json",
"[",
"'Folder Name'",
"]",
",",
"head_row",
"=",
"import_json",
"[",
"'Head Row'",
"]",
",",
"index_col",
"=",
"import_json",
"[",
"'Index Col'",
"]",
",",
"convert_col",
"=",
"import_json",
"[",
"'Convert Col'",
"]",
",",
"concat_files",
"=",
"import_json",
"[",
"'Concat Files'",
"]",
",",
"save_file",
"=",
"import_json",
"[",
"'Save File'",
"]",
")",
"with",
"open",
"(",
"file_name",
")",
"as",
"f",
":",
"input_json",
"=",
"json",
".",
"load",
"(",
"f",
")",
"for",
"x",
"in",
"resample_freq",
":",
"# Resample data interval",
"input_json",
"[",
"'Clean'",
"]",
"[",
"'Frequency'",
"]",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"time_freq",
".",
"items",
"(",
")",
")",
")",
":",
"# Add time features",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Year'",
"]",
"=",
"time_freq",
"[",
"'year'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Month'",
"]",
"=",
"time_freq",
"[",
"'month'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Week'",
"]",
"=",
"time_freq",
"[",
"'week'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Time of Day'",
"]",
"=",
"time_freq",
"[",
"'tod'",
"]",
"[",
"i",
"]",
"input_json",
"[",
"'Preprocess'",
"]",
"[",
"'Day of Week'",
"]",
"=",
"time_freq",
"[",
"'dow'",
"]",
"[",
"i",
"]",
"# Putting comment in json file to indicate which parameters have been changed",
"time_feature",
"=",
"None",
"for",
"key",
"in",
"time_freq",
":",
"if",
"time_freq",
"[",
"key",
"]",
"[",
"i",
"]",
":",
"time_feature",
"=",
"key",
"self",
".",
"result",
"[",
"'Comment'",
"]",
"=",
"'Freq: '",
"+",
"x",
"+",
"', '",
"+",
"'Time Feature: '",
"+",
"time_feature",
"# Read parameters in input_json",
"self",
".",
"read_json",
"(",
"file_name",
"=",
"None",
",",
"input_json",
"=",
"input_json",
",",
"imported_data",
"=",
"imported_data",
")",
"# Keep track of highest adj_r2 score",
"if",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'adj_r2'",
"]",
">",
"optimal_score",
":",
"optimal_score",
"=",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"[",
"'adj_r2'",
"]",
"optimal_model_file_name",
"=",
"self",
".",
"results_folder_name",
"+",
"'/results-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.json'",
"# Wrapper.global_count += 1",
"print",
"(",
"'Most optimal model: '",
",",
"optimal_model_file_name",
")",
"freq",
"=",
"self",
".",
"result",
"[",
"'Comment'",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"1",
"]",
"[",
":",
"-",
"1",
"]",
"time_feat",
"=",
"self",
".",
"result",
"[",
"'Comment'",
"]",
".",
"split",
"(",
"' '",
")",
"[",
"-",
"1",
"]",
"print",
"(",
"'Freq: '",
",",
"freq",
",",
"'Time Feature: '",
",",
"time_feat",
")"
] | Run models on different data configurations.
Note
----
The input json file should include ALL parameters.
Parameters
----------
file_name : str
Optional json file to read parameters.
imported_data : pd.DataFrame()
Pandas Dataframe containing data. | [
"Run",
"models",
"on",
"different",
"data",
"configurations",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L302-L373 | train | 234,958 |
SoftwareDefinedBuildings/XBOS | apps/Data_quality_analysis/Wrapper.py | Wrapper.clean_data | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | python | def clean_data(self, data, rename_col=None, drop_col=None,
resample=True, freq='h', resampler='mean',
interpolate=True, limit=1, method='linear',
remove_na=True, remove_na_how='any',
remove_outliers=True, sd_val=3,
remove_out_of_bounds=True, low_bound=0, high_bound=float('inf'),
save_file=True):
""" Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise TypeError('data has to be a pandas dataframe.')
# Create instance and clean the data
clean_data_obj = Clean_Data(data)
clean_data_obj.clean_data(resample=resample, freq=freq, resampler=resampler,
interpolate=interpolate, limit=limit, method=method,
remove_na=remove_na, remove_na_how=remove_na_how,
remove_outliers=remove_outliers, sd_val=sd_val,
remove_out_of_bounds=remove_out_of_bounds, low_bound=low_bound, high_bound=high_bound)
# Correlation plot
# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)
# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')
if rename_col: # Rename columns of dataframe
clean_data_obj.rename_columns(rename_col)
if drop_col: # Drop columns of dataframe
clean_data_obj.drop_columns(drop_col)
# Store cleaned data in wrapper class
self.cleaned_data = clean_data_obj.cleaned_data
# Logging
self.result['Clean'] = {
'Rename Col': rename_col,
'Drop Col': drop_col,
'Resample': resample,
'Frequency': freq,
'Resampler': resampler,
'Interpolate': interpolate,
'Limit': limit,
'Method': method,
'Remove NA': remove_na,
'Remove NA How': remove_na_how,
'Remove Outliers': remove_outliers,
'SD Val': sd_val,
'Remove Out of Bounds': remove_out_of_bounds,
'Low Bound': low_bound,
'High Bound': str(high_bound) if high_bound == float('inf') else high_bound,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/cleaned_data-' + str(self.get_global_count()) + '.csv'
self.cleaned_data.to_csv(f)
self.result['Clean']['Saved File'] = f
else:
self.result['Clean']['Saved File'] = ''
return self.cleaned_data | [
"def",
"clean_data",
"(",
"self",
",",
"data",
",",
"rename_col",
"=",
"None",
",",
"drop_col",
"=",
"None",
",",
"resample",
"=",
"True",
",",
"freq",
"=",
"'h'",
",",
"resampler",
"=",
"'mean'",
",",
"interpolate",
"=",
"True",
",",
"limit",
"=",
"1",
",",
"method",
"=",
"'linear'",
",",
"remove_na",
"=",
"True",
",",
"remove_na_how",
"=",
"'any'",
",",
"remove_outliers",
"=",
"True",
",",
"sd_val",
"=",
"3",
",",
"remove_out_of_bounds",
"=",
"True",
",",
"low_bound",
"=",
"0",
",",
"high_bound",
"=",
"float",
"(",
"'inf'",
")",
",",
"save_file",
"=",
"True",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"TypeError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance and clean the data",
"clean_data_obj",
"=",
"Clean_Data",
"(",
"data",
")",
"clean_data_obj",
".",
"clean_data",
"(",
"resample",
"=",
"resample",
",",
"freq",
"=",
"freq",
",",
"resampler",
"=",
"resampler",
",",
"interpolate",
"=",
"interpolate",
",",
"limit",
"=",
"limit",
",",
"method",
"=",
"method",
",",
"remove_na",
"=",
"remove_na",
",",
"remove_na_how",
"=",
"remove_na_how",
",",
"remove_outliers",
"=",
"remove_outliers",
",",
"sd_val",
"=",
"sd_val",
",",
"remove_out_of_bounds",
"=",
"remove_out_of_bounds",
",",
"low_bound",
"=",
"low_bound",
",",
"high_bound",
"=",
"high_bound",
")",
"# Correlation plot",
"# fig = self.plot_data_obj.correlation_plot(clean_data_obj.cleaned_data)",
"# fig.savefig(self.results_folder_name + '/correlation_plot-' + str(Wrapper.global_count) + '.png')",
"if",
"rename_col",
":",
"# Rename columns of dataframe",
"clean_data_obj",
".",
"rename_columns",
"(",
"rename_col",
")",
"if",
"drop_col",
":",
"# Drop columns of dataframe",
"clean_data_obj",
".",
"drop_columns",
"(",
"drop_col",
")",
"# Store cleaned data in wrapper class",
"self",
".",
"cleaned_data",
"=",
"clean_data_obj",
".",
"cleaned_data",
"# Logging",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"=",
"{",
"'Rename Col'",
":",
"rename_col",
",",
"'Drop Col'",
":",
"drop_col",
",",
"'Resample'",
":",
"resample",
",",
"'Frequency'",
":",
"freq",
",",
"'Resampler'",
":",
"resampler",
",",
"'Interpolate'",
":",
"interpolate",
",",
"'Limit'",
":",
"limit",
",",
"'Method'",
":",
"method",
",",
"'Remove NA'",
":",
"remove_na",
",",
"'Remove NA How'",
":",
"remove_na_how",
",",
"'Remove Outliers'",
":",
"remove_outliers",
",",
"'SD Val'",
":",
"sd_val",
",",
"'Remove Out of Bounds'",
":",
"remove_out_of_bounds",
",",
"'Low Bound'",
":",
"low_bound",
",",
"'High Bound'",
":",
"str",
"(",
"high_bound",
")",
"if",
"high_bound",
"==",
"float",
"(",
"'inf'",
")",
"else",
"high_bound",
",",
"'Save File'",
":",
"save_file",
"}",
"if",
"save_file",
":",
"f",
"=",
"self",
".",
"results_folder_name",
"+",
"'/cleaned_data-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.csv'",
"self",
".",
"cleaned_data",
".",
"to_csv",
"(",
"f",
")",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"[",
"'Saved File'",
"]",
"=",
"f",
"else",
":",
"self",
".",
"result",
"[",
"'Clean'",
"]",
"[",
"'Saved File'",
"]",
"=",
"''",
"return",
"self",
".",
"cleaned_data"
] | Cleans dataframe according to user specifications and stores result in self.cleaned_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be cleaned.
rename_col : list(str)
List of new column names.
drop_col : list(str)
Columns to be dropped.
resample : bool
Indicates whether to resample data or not.
freq : str
Resampling frequency i.e. d, h, 15T...
resampler : str
Resampling type i.e. mean, max.
interpolate : bool
Indicates whether to interpolate data or not.
limit : int
Interpolation limit.
method : str
Interpolation method.
remove_na : bool
Indicates whether to remove NAs or not.
remove_na_how : str
Specificies how to remove NA i.e. all, any...
remove_outliers : bool
Indicates whether to remove outliers or not.
sd_val : int
Standard Deviation Value (specifices how many SDs away is a point considered an outlier)
remove_out_of_bounds : bool
Indicates whether to remove out of bounds datapoints or not.
low_bound : int
Low bound of the data.
high_bound : int
High bound of the data.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing cleaned data. | [
"Cleans",
"dataframe",
"according",
"to",
"user",
"specifications",
"and",
"stores",
"result",
"in",
"self",
".",
"cleaned_data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/Data_quality_analysis/Wrapper.py#L439-L543 | train | 234,959 |
SoftwareDefinedBuildings/XBOS | dashboards/sitedash/app.py | prevmonday | def prevmonday(num):
"""
Return unix SECOND timestamp of "num" mondays ago
"""
today = get_today()
lastmonday = today - timedelta(days=today.weekday(), weeks=num)
return lastmonday | python | def prevmonday(num):
"""
Return unix SECOND timestamp of "num" mondays ago
"""
today = get_today()
lastmonday = today - timedelta(days=today.weekday(), weeks=num)
return lastmonday | [
"def",
"prevmonday",
"(",
"num",
")",
":",
"today",
"=",
"get_today",
"(",
")",
"lastmonday",
"=",
"today",
"-",
"timedelta",
"(",
"days",
"=",
"today",
".",
"weekday",
"(",
")",
",",
"weeks",
"=",
"num",
")",
"return",
"lastmonday"
] | Return unix SECOND timestamp of "num" mondays ago | [
"Return",
"unix",
"SECOND",
"timestamp",
"of",
"num",
"mondays",
"ago"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/dashboards/sitedash/app.py#L74-L80 | train | 234,960 |
SoftwareDefinedBuildings/XBOS | apps/consumption/iec.py | med_filt | def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | python | def med_filt(x, k=201):
"""Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints.
"""
if x.ndim > 1:
x = np.squeeze(x)
med = np.median(x)
assert k % 2 == 1, "Median filter length must be odd."
assert x.ndim == 1, "Input must be one-dimensional."
k2 = (k - 1) // 2
y = np.zeros((len(x), k), dtype=x.dtype)
y[:, k2] = x
for i in range(k2):
j = k2 - i
y[j:, i] = x[:-j]
y[:j, i] = x[0]
y[:-j, -(i + 1)] = x[j:]
y[-j:, -(i + 1)] = med
return np.median(y, axis=1) | [
"def",
"med_filt",
"(",
"x",
",",
"k",
"=",
"201",
")",
":",
"if",
"x",
".",
"ndim",
">",
"1",
":",
"x",
"=",
"np",
".",
"squeeze",
"(",
"x",
")",
"med",
"=",
"np",
".",
"median",
"(",
"x",
")",
"assert",
"k",
"%",
"2",
"==",
"1",
",",
"\"Median filter length must be odd.\"",
"assert",
"x",
".",
"ndim",
"==",
"1",
",",
"\"Input must be one-dimensional.\"",
"k2",
"=",
"(",
"k",
"-",
"1",
")",
"//",
"2",
"y",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"x",
")",
",",
"k",
")",
",",
"dtype",
"=",
"x",
".",
"dtype",
")",
"y",
"[",
":",
",",
"k2",
"]",
"=",
"x",
"for",
"i",
"in",
"range",
"(",
"k2",
")",
":",
"j",
"=",
"k2",
"-",
"i",
"y",
"[",
"j",
":",
",",
"i",
"]",
"=",
"x",
"[",
":",
"-",
"j",
"]",
"y",
"[",
":",
"j",
",",
"i",
"]",
"=",
"x",
"[",
"0",
"]",
"y",
"[",
":",
"-",
"j",
",",
"-",
"(",
"i",
"+",
"1",
")",
"]",
"=",
"x",
"[",
"j",
":",
"]",
"y",
"[",
"-",
"j",
":",
",",
"-",
"(",
"i",
"+",
"1",
")",
"]",
"=",
"med",
"return",
"np",
".",
"median",
"(",
"y",
",",
"axis",
"=",
"1",
")"
] | Apply a length-k median filter to a 1D array x.
Boundaries are extended by repeating endpoints. | [
"Apply",
"a",
"length",
"-",
"k",
"median",
"filter",
"to",
"a",
"1D",
"array",
"x",
".",
"Boundaries",
"are",
"extended",
"by",
"repeating",
"endpoints",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/consumption/iec.py#L114-L132 | train | 234,961 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Wrapper.py | Wrapper.preprocess_data | def preprocess_data(self, data,
hdh_cpoint=65, cdh_cpoint=65, col_hdh_cdh=None,
col_degree=None, degree=None,
standardize=False, normalize=False,
year=False, month=False, week=False, tod=False, dow=False,
save_file=True):
""" Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
preprocess_data_obj = Preprocess_Data(data)
if col_hdh_cdh:
preprocess_data_obj.add_degree_days(col=col_hdh_cdh, hdh_cpoint=hdh_cpoint, cdh_cpoint=cdh_cpoint)
preprocess_data_obj.add_col_features(col=col_degree, degree=degree)
if standardize:
preprocess_data_obj.standardize()
if normalize:
preprocess_data_obj.normalize()
preprocess_data_obj.add_time_features(year=year, month=month, week=week, tod=tod, dow=dow)
# Store preprocessed data in wrapper class
self.preprocessed_data = preprocess_data_obj.preprocessed_data
# Logging
self.result['Preprocess'] = {
'HDH CPoint': hdh_cpoint,
'CDH CPoint': cdh_cpoint,
'HDH CDH Calc Col': col_hdh_cdh,
'Col Degree': col_degree,
'Degree': degree,
'Standardize': standardize,
'Normalize': normalize,
'Year': year,
'Month': month,
'Week': week,
'Time of Day': tod,
'Day of Week': dow,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/preprocessed_data-' + str(self.get_global_count()) + '.csv'
self.preprocessed_data.to_csv(f)
self.result['Preprocess']['Saved File'] = f
else:
self.result['Preprocess']['Saved File'] = ''
return self.preprocessed_data | python | def preprocess_data(self, data,
hdh_cpoint=65, cdh_cpoint=65, col_hdh_cdh=None,
col_degree=None, degree=None,
standardize=False, normalize=False,
year=False, month=False, week=False, tod=False, dow=False,
save_file=True):
""" Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
preprocess_data_obj = Preprocess_Data(data)
if col_hdh_cdh:
preprocess_data_obj.add_degree_days(col=col_hdh_cdh, hdh_cpoint=hdh_cpoint, cdh_cpoint=cdh_cpoint)
preprocess_data_obj.add_col_features(col=col_degree, degree=degree)
if standardize:
preprocess_data_obj.standardize()
if normalize:
preprocess_data_obj.normalize()
preprocess_data_obj.add_time_features(year=year, month=month, week=week, tod=tod, dow=dow)
# Store preprocessed data in wrapper class
self.preprocessed_data = preprocess_data_obj.preprocessed_data
# Logging
self.result['Preprocess'] = {
'HDH CPoint': hdh_cpoint,
'CDH CPoint': cdh_cpoint,
'HDH CDH Calc Col': col_hdh_cdh,
'Col Degree': col_degree,
'Degree': degree,
'Standardize': standardize,
'Normalize': normalize,
'Year': year,
'Month': month,
'Week': week,
'Time of Day': tod,
'Day of Week': dow,
'Save File': save_file
}
if save_file:
f = self.results_folder_name + '/preprocessed_data-' + str(self.get_global_count()) + '.csv'
self.preprocessed_data.to_csv(f)
self.result['Preprocess']['Saved File'] = f
else:
self.result['Preprocess']['Saved File'] = ''
return self.preprocessed_data | [
"def",
"preprocess_data",
"(",
"self",
",",
"data",
",",
"hdh_cpoint",
"=",
"65",
",",
"cdh_cpoint",
"=",
"65",
",",
"col_hdh_cdh",
"=",
"None",
",",
"col_degree",
"=",
"None",
",",
"degree",
"=",
"None",
",",
"standardize",
"=",
"False",
",",
"normalize",
"=",
"False",
",",
"year",
"=",
"False",
",",
"month",
"=",
"False",
",",
"week",
"=",
"False",
",",
"tod",
"=",
"False",
",",
"dow",
"=",
"False",
",",
"save_file",
"=",
"True",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"SystemError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance",
"preprocess_data_obj",
"=",
"Preprocess_Data",
"(",
"data",
")",
"if",
"col_hdh_cdh",
":",
"preprocess_data_obj",
".",
"add_degree_days",
"(",
"col",
"=",
"col_hdh_cdh",
",",
"hdh_cpoint",
"=",
"hdh_cpoint",
",",
"cdh_cpoint",
"=",
"cdh_cpoint",
")",
"preprocess_data_obj",
".",
"add_col_features",
"(",
"col",
"=",
"col_degree",
",",
"degree",
"=",
"degree",
")",
"if",
"standardize",
":",
"preprocess_data_obj",
".",
"standardize",
"(",
")",
"if",
"normalize",
":",
"preprocess_data_obj",
".",
"normalize",
"(",
")",
"preprocess_data_obj",
".",
"add_time_features",
"(",
"year",
"=",
"year",
",",
"month",
"=",
"month",
",",
"week",
"=",
"week",
",",
"tod",
"=",
"tod",
",",
"dow",
"=",
"dow",
")",
"# Store preprocessed data in wrapper class",
"self",
".",
"preprocessed_data",
"=",
"preprocess_data_obj",
".",
"preprocessed_data",
"# Logging",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"=",
"{",
"'HDH CPoint'",
":",
"hdh_cpoint",
",",
"'CDH CPoint'",
":",
"cdh_cpoint",
",",
"'HDH CDH Calc Col'",
":",
"col_hdh_cdh",
",",
"'Col Degree'",
":",
"col_degree",
",",
"'Degree'",
":",
"degree",
",",
"'Standardize'",
":",
"standardize",
",",
"'Normalize'",
":",
"normalize",
",",
"'Year'",
":",
"year",
",",
"'Month'",
":",
"month",
",",
"'Week'",
":",
"week",
",",
"'Time of Day'",
":",
"tod",
",",
"'Day of Week'",
":",
"dow",
",",
"'Save File'",
":",
"save_file",
"}",
"if",
"save_file",
":",
"f",
"=",
"self",
".",
"results_folder_name",
"+",
"'/preprocessed_data-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.csv'",
"self",
".",
"preprocessed_data",
".",
"to_csv",
"(",
"f",
")",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"[",
"'Saved File'",
"]",
"=",
"f",
"else",
":",
"self",
".",
"result",
"[",
"'Preprocess'",
"]",
"[",
"'Saved File'",
"]",
"=",
"''",
"return",
"self",
".",
"preprocessed_data"
] | Preprocesses dataframe according to user specifications and stores result in self.preprocessed_data.
Parameters
----------
data : pd.DataFrame()
Dataframe to be preprocessed.
hdh_cpoint : int
Heating degree hours. Defaults to 65.
cdh_cpoint : int
Cooling degree hours. Defaults to 65.
col_hdh_cdh : str
Column name which contains the outdoor air temperature.
col_degree : list(str)
Column to exponentiate.
degree : list(str)
Exponentiation degree.
standardize : bool
Standardize data.
normalize : bool
Normalize data.
year : bool
Year.
month : bool
Month.
week : bool
Week.
tod : bool
Time of Day.
dow : bool
Day of Week.
save_file : bool
Specifies whether to save file or not. Defaults to True.
Returns
-------
pd.DataFrame()
Dataframe containing preprocessed data. | [
"Preprocesses",
"dataframe",
"according",
"to",
"user",
"specifications",
"and",
"stores",
"result",
"in",
"self",
".",
"preprocessed_data",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Wrapper.py#L544-L634 | train | 234,962 |
SoftwareDefinedBuildings/XBOS | apps/data_analysis/XBOS_data_analytics/Wrapper.py | Wrapper.model | def model(self, data,
ind_col=None, dep_col=None,
project_ind_col=None,
baseline_period=[None, None], projection_period=None, exclude_time_period=None,
alphas=np.logspace(-4,1,30),
cv=3, plot=True, figsize=None,
custom_model_func=None):
""" Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
model_data_obj = Model_Data(data, ind_col, dep_col, alphas, cv, exclude_time_period, baseline_period, projection_period)
# Split data into baseline and projection
model_data_obj.split_data()
# Logging
self.result['Model'] = {
'Independent Col': ind_col,
'Dependent Col': dep_col,
'Projection Independent Col': project_ind_col,
'Baseline Period': baseline_period,
'Projection Period': projection_period,
'Exclude Time Period': exclude_time_period,
'Alphas': list(alphas),
'CV': cv,
'Plot': plot,
'Fig Size': figsize
}
# Runs all models on the data and returns optimal model
all_metrics = model_data_obj.run_models()
self.result['Model']['All Model\'s Metrics'] = all_metrics
# CHECK: Define custom model's parameter and return types in documentation.
if custom_model_func:
self.result['Model']['Custom Model\'s Metrics'] = model_data_obj.custom_model(custom_model_func)
# Fit optimal model to data
self.result['Model']['Optimal Model\'s Metrics'] = model_data_obj.best_model_fit()
if plot:
# Use project_ind_col if projecting into the future (no input data other than weather data)
input_col = model_data_obj.input_col if not project_ind_col else project_ind_col
fig, y_true, y_pred = self.plot_data_obj.baseline_projection_plot(model_data_obj.y_true, model_data_obj.y_pred,
model_data_obj.baseline_period, model_data_obj.projection_period,
model_data_obj.best_model_name, model_data_obj.best_metrics['adj_r2'],
model_data_obj.original_data,
input_col, model_data_obj.output_col,
model_data_obj.best_model,
self.result['Site'])
fig.savefig(self.results_folder_name + '/baseline_projection_plot-' + str(self.get_global_count()) + '.png')
if not y_true.empty and not y_pred.empty:
saving_absolute = (y_pred - y_true).sum()
saving_perc = (saving_absolute / y_pred.sum()) * 100
self.result['Energy Savings (%)'] = float(saving_perc)
self.result['Energy Savings (absolute)'] = saving_absolute
# Temporary
self.project_df['true'] = y_true
self.project_df['pred'] = y_pred
# Calculate uncertainity of savings
self.result['Uncertainity'] = self.uncertainity_equation(model_data_obj, y_true, y_pred, 0.9)
else:
print('y_true: ', y_true)
print('y_pred: ', y_pred)
print('Error: y_true and y_pred are empty. Default to -1.0 savings.')
self.result['Energy Savings (%)'] = float(-1.0)
self.result['Energy Savings (absolute)'] = float(-1.0)
return self.best_metrics | python | def model(self, data,
ind_col=None, dep_col=None,
project_ind_col=None,
baseline_period=[None, None], projection_period=None, exclude_time_period=None,
alphas=np.logspace(-4,1,30),
cv=3, plot=True, figsize=None,
custom_model_func=None):
""" Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model.
"""
# Check to ensure data is a pandas dataframe
if not isinstance(data, pd.DataFrame):
raise SystemError('data has to be a pandas dataframe.')
# Create instance
model_data_obj = Model_Data(data, ind_col, dep_col, alphas, cv, exclude_time_period, baseline_period, projection_period)
# Split data into baseline and projection
model_data_obj.split_data()
# Logging
self.result['Model'] = {
'Independent Col': ind_col,
'Dependent Col': dep_col,
'Projection Independent Col': project_ind_col,
'Baseline Period': baseline_period,
'Projection Period': projection_period,
'Exclude Time Period': exclude_time_period,
'Alphas': list(alphas),
'CV': cv,
'Plot': plot,
'Fig Size': figsize
}
# Runs all models on the data and returns optimal model
all_metrics = model_data_obj.run_models()
self.result['Model']['All Model\'s Metrics'] = all_metrics
# CHECK: Define custom model's parameter and return types in documentation.
if custom_model_func:
self.result['Model']['Custom Model\'s Metrics'] = model_data_obj.custom_model(custom_model_func)
# Fit optimal model to data
self.result['Model']['Optimal Model\'s Metrics'] = model_data_obj.best_model_fit()
if plot:
# Use project_ind_col if projecting into the future (no input data other than weather data)
input_col = model_data_obj.input_col if not project_ind_col else project_ind_col
fig, y_true, y_pred = self.plot_data_obj.baseline_projection_plot(model_data_obj.y_true, model_data_obj.y_pred,
model_data_obj.baseline_period, model_data_obj.projection_period,
model_data_obj.best_model_name, model_data_obj.best_metrics['adj_r2'],
model_data_obj.original_data,
input_col, model_data_obj.output_col,
model_data_obj.best_model,
self.result['Site'])
fig.savefig(self.results_folder_name + '/baseline_projection_plot-' + str(self.get_global_count()) + '.png')
if not y_true.empty and not y_pred.empty:
saving_absolute = (y_pred - y_true).sum()
saving_perc = (saving_absolute / y_pred.sum()) * 100
self.result['Energy Savings (%)'] = float(saving_perc)
self.result['Energy Savings (absolute)'] = saving_absolute
# Temporary
self.project_df['true'] = y_true
self.project_df['pred'] = y_pred
# Calculate uncertainity of savings
self.result['Uncertainity'] = self.uncertainity_equation(model_data_obj, y_true, y_pred, 0.9)
else:
print('y_true: ', y_true)
print('y_pred: ', y_pred)
print('Error: y_true and y_pred are empty. Default to -1.0 savings.')
self.result['Energy Savings (%)'] = float(-1.0)
self.result['Energy Savings (absolute)'] = float(-1.0)
return self.best_metrics | [
"def",
"model",
"(",
"self",
",",
"data",
",",
"ind_col",
"=",
"None",
",",
"dep_col",
"=",
"None",
",",
"project_ind_col",
"=",
"None",
",",
"baseline_period",
"=",
"[",
"None",
",",
"None",
"]",
",",
"projection_period",
"=",
"None",
",",
"exclude_time_period",
"=",
"None",
",",
"alphas",
"=",
"np",
".",
"logspace",
"(",
"-",
"4",
",",
"1",
",",
"30",
")",
",",
"cv",
"=",
"3",
",",
"plot",
"=",
"True",
",",
"figsize",
"=",
"None",
",",
"custom_model_func",
"=",
"None",
")",
":",
"# Check to ensure data is a pandas dataframe",
"if",
"not",
"isinstance",
"(",
"data",
",",
"pd",
".",
"DataFrame",
")",
":",
"raise",
"SystemError",
"(",
"'data has to be a pandas dataframe.'",
")",
"# Create instance",
"model_data_obj",
"=",
"Model_Data",
"(",
"data",
",",
"ind_col",
",",
"dep_col",
",",
"alphas",
",",
"cv",
",",
"exclude_time_period",
",",
"baseline_period",
",",
"projection_period",
")",
"# Split data into baseline and projection",
"model_data_obj",
".",
"split_data",
"(",
")",
"# Logging",
"self",
".",
"result",
"[",
"'Model'",
"]",
"=",
"{",
"'Independent Col'",
":",
"ind_col",
",",
"'Dependent Col'",
":",
"dep_col",
",",
"'Projection Independent Col'",
":",
"project_ind_col",
",",
"'Baseline Period'",
":",
"baseline_period",
",",
"'Projection Period'",
":",
"projection_period",
",",
"'Exclude Time Period'",
":",
"exclude_time_period",
",",
"'Alphas'",
":",
"list",
"(",
"alphas",
")",
",",
"'CV'",
":",
"cv",
",",
"'Plot'",
":",
"plot",
",",
"'Fig Size'",
":",
"figsize",
"}",
"# Runs all models on the data and returns optimal model",
"all_metrics",
"=",
"model_data_obj",
".",
"run_models",
"(",
")",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'All Model\\'s Metrics'",
"]",
"=",
"all_metrics",
"# CHECK: Define custom model's parameter and return types in documentation.",
"if",
"custom_model_func",
":",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Custom Model\\'s Metrics'",
"]",
"=",
"model_data_obj",
".",
"custom_model",
"(",
"custom_model_func",
")",
"# Fit optimal model to data",
"self",
".",
"result",
"[",
"'Model'",
"]",
"[",
"'Optimal Model\\'s Metrics'",
"]",
"=",
"model_data_obj",
".",
"best_model_fit",
"(",
")",
"if",
"plot",
":",
"# Use project_ind_col if projecting into the future (no input data other than weather data)",
"input_col",
"=",
"model_data_obj",
".",
"input_col",
"if",
"not",
"project_ind_col",
"else",
"project_ind_col",
"fig",
",",
"y_true",
",",
"y_pred",
"=",
"self",
".",
"plot_data_obj",
".",
"baseline_projection_plot",
"(",
"model_data_obj",
".",
"y_true",
",",
"model_data_obj",
".",
"y_pred",
",",
"model_data_obj",
".",
"baseline_period",
",",
"model_data_obj",
".",
"projection_period",
",",
"model_data_obj",
".",
"best_model_name",
",",
"model_data_obj",
".",
"best_metrics",
"[",
"'adj_r2'",
"]",
",",
"model_data_obj",
".",
"original_data",
",",
"input_col",
",",
"model_data_obj",
".",
"output_col",
",",
"model_data_obj",
".",
"best_model",
",",
"self",
".",
"result",
"[",
"'Site'",
"]",
")",
"fig",
".",
"savefig",
"(",
"self",
".",
"results_folder_name",
"+",
"'/baseline_projection_plot-'",
"+",
"str",
"(",
"self",
".",
"get_global_count",
"(",
")",
")",
"+",
"'.png'",
")",
"if",
"not",
"y_true",
".",
"empty",
"and",
"not",
"y_pred",
".",
"empty",
":",
"saving_absolute",
"=",
"(",
"y_pred",
"-",
"y_true",
")",
".",
"sum",
"(",
")",
"saving_perc",
"=",
"(",
"saving_absolute",
"/",
"y_pred",
".",
"sum",
"(",
")",
")",
"*",
"100",
"self",
".",
"result",
"[",
"'Energy Savings (%)'",
"]",
"=",
"float",
"(",
"saving_perc",
")",
"self",
".",
"result",
"[",
"'Energy Savings (absolute)'",
"]",
"=",
"saving_absolute",
"# Temporary",
"self",
".",
"project_df",
"[",
"'true'",
"]",
"=",
"y_true",
"self",
".",
"project_df",
"[",
"'pred'",
"]",
"=",
"y_pred",
"# Calculate uncertainity of savings",
"self",
".",
"result",
"[",
"'Uncertainity'",
"]",
"=",
"self",
".",
"uncertainity_equation",
"(",
"model_data_obj",
",",
"y_true",
",",
"y_pred",
",",
"0.9",
")",
"else",
":",
"print",
"(",
"'y_true: '",
",",
"y_true",
")",
"print",
"(",
"'y_pred: '",
",",
"y_pred",
")",
"print",
"(",
"'Error: y_true and y_pred are empty. Default to -1.0 savings.'",
")",
"self",
".",
"result",
"[",
"'Energy Savings (%)'",
"]",
"=",
"float",
"(",
"-",
"1.0",
")",
"self",
".",
"result",
"[",
"'Energy Savings (absolute)'",
"]",
"=",
"float",
"(",
"-",
"1.0",
")",
"return",
"self",
".",
"best_metrics"
] | Split data into baseline and projection periods, run models on them and display metrics & plots.
Parameters
----------
data : pd.DataFrame()
Dataframe to model.
ind_col : list(str)
Independent column(s) of dataframe. Defaults to all columns except the last.
dep_col : str
Dependent column of dataframe.
project_ind_col : list(str)
Independent column(s) to use for projection. If none, use ind_col.
baseline_period : list(str)
List of time periods to split the data into baseline periods. It needs to have a start and an end date.
projection_period : list(str)
List of time periods to split the data into projection periods. It needs to have a start and an end date.
exclude_time_period : list(str)
List of time periods to exclude for modeling.
alphas : list(int)
List of alphas to run regression on.
cv : int
Number of folds for cross-validation.
plot : bool
Specifies whether to save plots or not.
figsize : tuple
Size of the plots.
custom_model_func : function
Model with specific hyper-parameters provided by user.
Returns
-------
dict
Metrics of the optimal/best model. | [
"Split",
"data",
"into",
"baseline",
"and",
"projection",
"periods",
"run",
"models",
"on",
"them",
"and",
"display",
"metrics",
"&",
"plots",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/apps/data_analysis/XBOS_data_analytics/Wrapper.py#L637-L749 | train | 234,963 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | make_dataframe | def make_dataframe(result):
"""
Turns the results of one of the data API calls into a pandas dataframe
"""
import pandas as pd
ret = {}
if isinstance(result,dict):
if 'timeseries' in result:
result = result['timeseries']
for uuid, data in result.items():
df = pd.DataFrame(data)
if len(df.columns) == 5: # statistical data
df.columns = ['time','min','mean','max','count']
else:
df.columns = ['time','value']
df['time'] = pd.to_datetime(df['time'],unit='ns')
df = df.set_index(df.pop('time'))
ret[uuid] = df
return ret | python | def make_dataframe(result):
"""
Turns the results of one of the data API calls into a pandas dataframe
"""
import pandas as pd
ret = {}
if isinstance(result,dict):
if 'timeseries' in result:
result = result['timeseries']
for uuid, data in result.items():
df = pd.DataFrame(data)
if len(df.columns) == 5: # statistical data
df.columns = ['time','min','mean','max','count']
else:
df.columns = ['time','value']
df['time'] = pd.to_datetime(df['time'],unit='ns')
df = df.set_index(df.pop('time'))
ret[uuid] = df
return ret | [
"def",
"make_dataframe",
"(",
"result",
")",
":",
"import",
"pandas",
"as",
"pd",
"ret",
"=",
"{",
"}",
"if",
"isinstance",
"(",
"result",
",",
"dict",
")",
":",
"if",
"'timeseries'",
"in",
"result",
":",
"result",
"=",
"result",
"[",
"'timeseries'",
"]",
"for",
"uuid",
",",
"data",
"in",
"result",
".",
"items",
"(",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"data",
")",
"if",
"len",
"(",
"df",
".",
"columns",
")",
"==",
"5",
":",
"# statistical data",
"df",
".",
"columns",
"=",
"[",
"'time'",
",",
"'min'",
",",
"'mean'",
",",
"'max'",
",",
"'count'",
"]",
"else",
":",
"df",
".",
"columns",
"=",
"[",
"'time'",
",",
"'value'",
"]",
"df",
"[",
"'time'",
"]",
"=",
"pd",
".",
"to_datetime",
"(",
"df",
"[",
"'time'",
"]",
",",
"unit",
"=",
"'ns'",
")",
"df",
"=",
"df",
".",
"set_index",
"(",
"df",
".",
"pop",
"(",
"'time'",
")",
")",
"ret",
"[",
"uuid",
"]",
"=",
"df",
"return",
"ret"
] | Turns the results of one of the data API calls into a pandas dataframe | [
"Turns",
"the",
"results",
"of",
"one",
"of",
"the",
"data",
"API",
"calls",
"into",
"a",
"pandas",
"dataframe"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L247-L265 | train | 234,964 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.query | def query(self, query, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if archiver == "":
archiver = self.archivers[0]
nonce = random.randint(0, 2**32)
ev = threading.Event()
response = {}
def _handleresult(msg):
# decode, throw away if not correct nonce
got_response = False
error = getError(nonce, msg)
if error is not None:
got_response = True
response["error"] = error
metadata = getMetadata(nonce, msg)
if metadata is not None:
got_response = True
response["metadata"] = metadata
timeseries = getTimeseries(nonce, msg)
if timeseries is not None:
got_response = True
response["timeseries"] = timeseries
if got_response:
ev.set()
vk = self.vk[:-1] # remove last part of VK because archiver doesn't expect it
# set up receiving
self.c.subscribe("{0}/s.giles/_/i.archiver/signal/{1},queries".format(archiver, vk), _handleresult)
# execute query
q_struct = msgpack.packb({"Query": query, "Nonce": nonce})
po = PayloadObject((2,0,8,1), None, q_struct)
self.c.publish("{0}/s.giles/_/i.archiver/slot/query".format(archiver), payload_objects=(po,))
ev.wait(timeout)
if len(response) == 0: # no results
raise TimeoutException("Query of {0} timed out".format(query))
return response | python | def query(self, query, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if archiver == "":
archiver = self.archivers[0]
nonce = random.randint(0, 2**32)
ev = threading.Event()
response = {}
def _handleresult(msg):
# decode, throw away if not correct nonce
got_response = False
error = getError(nonce, msg)
if error is not None:
got_response = True
response["error"] = error
metadata = getMetadata(nonce, msg)
if metadata is not None:
got_response = True
response["metadata"] = metadata
timeseries = getTimeseries(nonce, msg)
if timeseries is not None:
got_response = True
response["timeseries"] = timeseries
if got_response:
ev.set()
vk = self.vk[:-1] # remove last part of VK because archiver doesn't expect it
# set up receiving
self.c.subscribe("{0}/s.giles/_/i.archiver/signal/{1},queries".format(archiver, vk), _handleresult)
# execute query
q_struct = msgpack.packb({"Query": query, "Nonce": nonce})
po = PayloadObject((2,0,8,1), None, q_struct)
self.c.publish("{0}/s.giles/_/i.archiver/slot/query".format(archiver), payload_objects=(po,))
ev.wait(timeout)
if len(response) == 0: # no results
raise TimeoutException("Query of {0} timed out".format(query))
return response | [
"def",
"query",
"(",
"self",
",",
"query",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"archiver",
"==",
"\"\"",
":",
"archiver",
"=",
"self",
".",
"archivers",
"[",
"0",
"]",
"nonce",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"2",
"**",
"32",
")",
"ev",
"=",
"threading",
".",
"Event",
"(",
")",
"response",
"=",
"{",
"}",
"def",
"_handleresult",
"(",
"msg",
")",
":",
"# decode, throw away if not correct nonce",
"got_response",
"=",
"False",
"error",
"=",
"getError",
"(",
"nonce",
",",
"msg",
")",
"if",
"error",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"error\"",
"]",
"=",
"error",
"metadata",
"=",
"getMetadata",
"(",
"nonce",
",",
"msg",
")",
"if",
"metadata",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"metadata\"",
"]",
"=",
"metadata",
"timeseries",
"=",
"getTimeseries",
"(",
"nonce",
",",
"msg",
")",
"if",
"timeseries",
"is",
"not",
"None",
":",
"got_response",
"=",
"True",
"response",
"[",
"\"timeseries\"",
"]",
"=",
"timeseries",
"if",
"got_response",
":",
"ev",
".",
"set",
"(",
")",
"vk",
"=",
"self",
".",
"vk",
"[",
":",
"-",
"1",
"]",
"# remove last part of VK because archiver doesn't expect it",
"# set up receiving",
"self",
".",
"c",
".",
"subscribe",
"(",
"\"{0}/s.giles/_/i.archiver/signal/{1},queries\"",
".",
"format",
"(",
"archiver",
",",
"vk",
")",
",",
"_handleresult",
")",
"# execute query",
"q_struct",
"=",
"msgpack",
".",
"packb",
"(",
"{",
"\"Query\"",
":",
"query",
",",
"\"Nonce\"",
":",
"nonce",
"}",
")",
"po",
"=",
"PayloadObject",
"(",
"(",
"2",
",",
"0",
",",
"8",
",",
"1",
")",
",",
"None",
",",
"q_struct",
")",
"self",
".",
"c",
".",
"publish",
"(",
"\"{0}/s.giles/_/i.archiver/slot/query\"",
".",
"format",
"(",
"archiver",
")",
",",
"payload_objects",
"=",
"(",
"po",
",",
")",
")",
"ev",
".",
"wait",
"(",
"timeout",
")",
"if",
"len",
"(",
"response",
")",
"==",
"0",
":",
"# no results",
"raise",
"TimeoutException",
"(",
"\"Query of {0} timed out\"",
".",
"format",
"(",
"query",
")",
")",
"return",
"response"
] | Runs the given pundat query and returns the results as a Python object.
Arguments:
[query]: the query string
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Runs",
"the",
"given",
"pundat",
"query",
"and",
"returns",
"the",
"results",
"as",
"a",
"Python",
"object",
"."
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L61-L111 | train | 234,965 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.uuids | def uuids(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
resp = self.query("select uuid where {0}".format(where), archiver, timeout)
uuids = []
for r in resp["metadata"]:
uuids.append(r["uuid"])
return uuids | python | def uuids(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
resp = self.query("select uuid where {0}".format(where), archiver, timeout)
uuids = []
for r in resp["metadata"]:
uuids.append(r["uuid"])
return uuids | [
"def",
"uuids",
"(",
"self",
",",
"where",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"resp",
"=",
"self",
".",
"query",
"(",
"\"select uuid where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
"uuids",
"=",
"[",
"]",
"for",
"r",
"in",
"resp",
"[",
"\"metadata\"",
"]",
":",
"uuids",
".",
"append",
"(",
"r",
"[",
"\"uuid\"",
"]",
")",
"return",
"uuids"
] | Using the given where-clause, finds all UUIDs that match
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Using",
"the",
"given",
"where",
"-",
"clause",
"finds",
"all",
"UUIDs",
"that",
"match"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L113-L127 | train | 234,966 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.tags | def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | python | def tags(self, where, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | [
"def",
"tags",
"(",
"self",
",",
"where",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select * where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")"
] | Retrieves tags for all streams matching the given WHERE clause
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Retrieves",
"tags",
"for",
"all",
"streams",
"matching",
"the",
"given",
"WHERE",
"clause"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L129-L139 | train | 234,967 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.tags_uuids | def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | python | def tags_uuids(self, uuids, archiver="", timeout=DEFAULT_TIMEOUT):
"""
Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select * where {0}".format(where), archiver, timeout).get('metadata',{}) | [
"def",
"tags_uuids",
"(",
"self",
",",
"uuids",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"not",
"isinstance",
"(",
"uuids",
",",
"list",
")",
":",
"uuids",
"=",
"[",
"uuids",
"]",
"where",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"'uuid = \"{0}\"'",
".",
"format",
"(",
"uuid",
")",
"for",
"uuid",
"in",
"uuids",
"]",
")",
"return",
"self",
".",
"query",
"(",
"\"select * where {0}\"",
".",
"format",
"(",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")"
] | Retrieves tags for all streams with the provided UUIDs
Arguments:
[uuids]: list of UUIDs
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"Retrieves",
"tags",
"for",
"all",
"streams",
"with",
"the",
"provided",
"UUIDs"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L141-L154 | train | 234,968 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.data | def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | python | def data(self, where, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | [
"def",
"data",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all RAW data between the 2 given timestamps
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"RAW",
"data",
"between",
"the",
"2",
"given",
"timestamps"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L156-L167 | train | 234,969 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.data_uuids | def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | python | def data_uuids(self, uuids, start, end, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
if not isinstance(uuids, list):
uuids = [uuids]
where = " or ".join(['uuid = "{0}"'.format(uuid) for uuid in uuids])
return self.query("select data in ({0}, {1}) where {2}".format(start, end, where), archiver, timeout).get('timeseries',{}) | [
"def",
"data_uuids",
"(",
"self",
",",
"uuids",
",",
"start",
",",
"end",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"if",
"not",
"isinstance",
"(",
"uuids",
",",
"list",
")",
":",
"uuids",
"=",
"[",
"uuids",
"]",
"where",
"=",
"\" or \"",
".",
"join",
"(",
"[",
"'uuid = \"{0}\"'",
".",
"format",
"(",
"uuid",
")",
"for",
"uuid",
"in",
"uuids",
"]",
")",
"return",
"self",
".",
"query",
"(",
"\"select data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given list of UUIDs, retrieves all RAW data between the 2 given timestamps
Arguments:
[uuids]: list of UUIDs
[start, end]: time references:
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"list",
"of",
"UUIDs",
"retrieves",
"all",
"RAW",
"data",
"between",
"the",
"2",
"given",
"timestamps"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L169-L183 | train | 234,970 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.stats | def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{}) | python | def stats(self, where, start, end, pw, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select statistical({3}) data in ({0}, {1}) where {2}".format(start, end, where, pw), archiver, timeout).get('timeseries',{}) | [
"def",
"stats",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"pw",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select statistical({3}) data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
",",
"pw",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given pointwidth
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[pw]: pointwidth (window size of 2^pw nanoseconds)
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"statistical",
"data",
"between",
"the",
"2",
"given",
"timestamps",
"using",
"the",
"given",
"pointwidth"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L185-L197 | train | 234,971 |
SoftwareDefinedBuildings/XBOS | python/xbos/services/pundat.py | DataClient.window | def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{}) | python | def window(self, where, start, end, width, archiver="", timeout=DEFAULT_TIMEOUT):
"""
With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver
"""
return self.query("select window({3}) data in ({0}, {1}) where {2}".format(start, end, where, width), archiver, timeout).get('timeseries',{}) | [
"def",
"window",
"(",
"self",
",",
"where",
",",
"start",
",",
"end",
",",
"width",
",",
"archiver",
"=",
"\"\"",
",",
"timeout",
"=",
"DEFAULT_TIMEOUT",
")",
":",
"return",
"self",
".",
"query",
"(",
"\"select window({3}) data in ({0}, {1}) where {2}\"",
".",
"format",
"(",
"start",
",",
"end",
",",
"where",
",",
"width",
")",
",",
"archiver",
",",
"timeout",
")",
".",
"get",
"(",
"'timeseries'",
",",
"{",
"}",
")"
] | With the given WHERE clause, retrieves all statistical data between the 2 given timestamps, using the given window size
Arguments:
[where]: the where clause (e.g. 'path like "keti"', 'SourceName = "TED Main"')
[start, end]: time references:
[width]: a time expression for the window size, e.g. "5s", "365d"
[archiver]: if specified, this is the archiver to use. Else, it will run on the first archiver passed
into the constructor for the client
[timeout]: time in seconds to wait for a response from the archiver | [
"With",
"the",
"given",
"WHERE",
"clause",
"retrieves",
"all",
"statistical",
"data",
"between",
"the",
"2",
"given",
"timestamps",
"using",
"the",
"given",
"window",
"size"
] | c12d4fb14518ea3ae98c471c28e0710fdf74dd25 | https://github.com/SoftwareDefinedBuildings/XBOS/blob/c12d4fb14518ea3ae98c471c28e0710fdf74dd25/python/xbos/services/pundat.py#L216-L228 | train | 234,972 |
Danielhiversen/flux_led | flux_led/__main__.py | WifiLedBulb.brightness | def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v | python | def brightness(self):
"""Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'.
"""
if self.mode == "ww":
return int(self.raw_state[9])
else:
_, _, v = colorsys.rgb_to_hsv(*self.getRgb())
return v | [
"def",
"brightness",
"(",
"self",
")",
":",
"if",
"self",
".",
"mode",
"==",
"\"ww\"",
":",
"return",
"int",
"(",
"self",
".",
"raw_state",
"[",
"9",
"]",
")",
"else",
":",
"_",
",",
"_",
",",
"v",
"=",
"colorsys",
".",
"rgb_to_hsv",
"(",
"*",
"self",
".",
"getRgb",
"(",
")",
")",
"return",
"v"
] | Return current brightness 0-255.
For warm white return current led level. For RGB
calculate the HSV and return the 'value'. | [
"Return",
"current",
"brightness",
"0",
"-",
"255",
"."
] | 13e87e06ff7589356c83e084a6be768ad1290557 | https://github.com/Danielhiversen/flux_led/blob/13e87e06ff7589356c83e084a6be768ad1290557/flux_led/__main__.py#L544-L554 | train | 234,973 |
kyrus/python-junit-xml | junit_xml/__init__.py | decode | def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | python | def decode(var, encoding):
"""
If not already unicode, decode it.
"""
if PY2:
if isinstance(var, unicode):
ret = var
elif isinstance(var, str):
if encoding:
ret = var.decode(encoding)
else:
ret = unicode(var)
else:
ret = unicode(var)
else:
ret = str(var)
return ret | [
"def",
"decode",
"(",
"var",
",",
"encoding",
")",
":",
"if",
"PY2",
":",
"if",
"isinstance",
"(",
"var",
",",
"unicode",
")",
":",
"ret",
"=",
"var",
"elif",
"isinstance",
"(",
"var",
",",
"str",
")",
":",
"if",
"encoding",
":",
"ret",
"=",
"var",
".",
"decode",
"(",
"encoding",
")",
"else",
":",
"ret",
"=",
"unicode",
"(",
"var",
")",
"else",
":",
"ret",
"=",
"unicode",
"(",
"var",
")",
"else",
":",
"ret",
"=",
"str",
"(",
"var",
")",
"return",
"ret"
] | If not already unicode, decode it. | [
"If",
"not",
"already",
"unicode",
"decode",
"it",
"."
] | 9bb2675bf0058742da04285dcdcf8781eee03db0 | https://github.com/kyrus/python-junit-xml/blob/9bb2675bf0058742da04285dcdcf8781eee03db0/junit_xml/__init__.py#L57-L73 | train | 234,974 |
esheldon/fitsio | fitsio/util.py | cfitsio_version | def cfitsio_version(asfloat=False):
"""
Return the cfitsio version as a string.
"""
# use string version to avoid roundoffs
ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
if asfloat:
return float(ver)
else:
return ver | python | def cfitsio_version(asfloat=False):
"""
Return the cfitsio version as a string.
"""
# use string version to avoid roundoffs
ver = '%0.3f' % _fitsio_wrap.cfitsio_version()
if asfloat:
return float(ver)
else:
return ver | [
"def",
"cfitsio_version",
"(",
"asfloat",
"=",
"False",
")",
":",
"# use string version to avoid roundoffs",
"ver",
"=",
"'%0.3f'",
"%",
"_fitsio_wrap",
".",
"cfitsio_version",
"(",
")",
"if",
"asfloat",
":",
"return",
"float",
"(",
"ver",
")",
"else",
":",
"return",
"ver"
] | Return the cfitsio version as a string. | [
"Return",
"the",
"cfitsio",
"version",
"as",
"a",
"string",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L19-L28 | train | 234,975 |
esheldon/fitsio | fitsio/util.py | is_little_endian | def is_little_endian(array):
"""
Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
byteorder = array.dtype.base.byteorder
return (byteorder == '<') or (machine_little and byteorder == '=') | python | def is_little_endian(array):
"""
Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
byteorder = array.dtype.base.byteorder
return (byteorder == '<') or (machine_little and byteorder == '=') | [
"def",
"is_little_endian",
"(",
"array",
")",
":",
"if",
"numpy",
".",
"little_endian",
":",
"machine_little",
"=",
"True",
"else",
":",
"machine_little",
"=",
"False",
"byteorder",
"=",
"array",
".",
"dtype",
".",
"base",
".",
"byteorder",
"return",
"(",
"byteorder",
"==",
"'<'",
")",
"or",
"(",
"machine_little",
"and",
"byteorder",
"==",
"'='",
")"
] | Return True if array is little endian, False otherwise.
Parameters
----------
array: numpy array
A numerical python array.
Returns
-------
Truth value:
True for little-endian
Notes
-----
Strings are neither big or little endian. The input must be a simple numpy
array, not an array with fields. | [
"Return",
"True",
"if",
"array",
"is",
"little",
"endian",
"False",
"otherwise",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L73-L98 | train | 234,976 |
esheldon/fitsio | fitsio/util.py | array_to_native | def array_to_native(array, inplace=False):
"""
Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
data_little = False
if array.dtype.names is None:
if array.dtype.base.byteorder == '|':
# strings and 1 byte integers
return array
data_little = is_little_endian(array)
else:
# assume all are same byte order: we only need to find one with
# little endian
for fname in array.dtype.names:
if is_little_endian(array[fname]):
data_little = True
break
if ((machine_little and not data_little)
or (not machine_little and data_little)):
output = array.byteswap(inplace)
else:
output = array
return output | python | def array_to_native(array, inplace=False):
"""
Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used.
"""
if numpy.little_endian:
machine_little = True
else:
machine_little = False
data_little = False
if array.dtype.names is None:
if array.dtype.base.byteorder == '|':
# strings and 1 byte integers
return array
data_little = is_little_endian(array)
else:
# assume all are same byte order: we only need to find one with
# little endian
for fname in array.dtype.names:
if is_little_endian(array[fname]):
data_little = True
break
if ((machine_little and not data_little)
or (not machine_little and data_little)):
output = array.byteswap(inplace)
else:
output = array
return output | [
"def",
"array_to_native",
"(",
"array",
",",
"inplace",
"=",
"False",
")",
":",
"if",
"numpy",
".",
"little_endian",
":",
"machine_little",
"=",
"True",
"else",
":",
"machine_little",
"=",
"False",
"data_little",
"=",
"False",
"if",
"array",
".",
"dtype",
".",
"names",
"is",
"None",
":",
"if",
"array",
".",
"dtype",
".",
"base",
".",
"byteorder",
"==",
"'|'",
":",
"# strings and 1 byte integers",
"return",
"array",
"data_little",
"=",
"is_little_endian",
"(",
"array",
")",
"else",
":",
"# assume all are same byte order: we only need to find one with",
"# little endian",
"for",
"fname",
"in",
"array",
".",
"dtype",
".",
"names",
":",
"if",
"is_little_endian",
"(",
"array",
"[",
"fname",
"]",
")",
":",
"data_little",
"=",
"True",
"break",
"if",
"(",
"(",
"machine_little",
"and",
"not",
"data_little",
")",
"or",
"(",
"not",
"machine_little",
"and",
"data_little",
")",
")",
":",
"output",
"=",
"array",
".",
"byteswap",
"(",
"inplace",
")",
"else",
":",
"output",
"=",
"array",
"return",
"output"
] | Convert an array to the native byte order.
NOTE: the inplace keyword argument is not currently used. | [
"Convert",
"an",
"array",
"to",
"the",
"native",
"byte",
"order",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L101-L134 | train | 234,977 |
esheldon/fitsio | fitsio/util.py | mks | def mks(val):
"""
make sure the value is a string, paying mind to python3 vs 2
"""
if sys.version_info > (3, 0, 0):
if isinstance(val, bytes):
sval = str(val, 'utf-8')
else:
sval = str(val)
else:
sval = str(val)
return sval | python | def mks(val):
"""
make sure the value is a string, paying mind to python3 vs 2
"""
if sys.version_info > (3, 0, 0):
if isinstance(val, bytes):
sval = str(val, 'utf-8')
else:
sval = str(val)
else:
sval = str(val)
return sval | [
"def",
"mks",
"(",
"val",
")",
":",
"if",
"sys",
".",
"version_info",
">",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"if",
"isinstance",
"(",
"val",
",",
"bytes",
")",
":",
"sval",
"=",
"str",
"(",
"val",
",",
"'utf-8'",
")",
"else",
":",
"sval",
"=",
"str",
"(",
"val",
")",
"else",
":",
"sval",
"=",
"str",
"(",
"val",
")",
"return",
"sval"
] | make sure the value is a string, paying mind to python3 vs 2 | [
"make",
"sure",
"the",
"value",
"is",
"a",
"string",
"paying",
"mind",
"to",
"python3",
"vs",
"2"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/util.py#L143-L155 | train | 234,978 |
esheldon/fitsio | fitsio/hdu/table.py | _get_col_dimstr | def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | python | def _get_col_dimstr(tdim, is_string=False):
"""
not for variable length
"""
dimstr = ''
if tdim is None:
dimstr = 'array[bad TDIM]'
else:
if is_string:
if len(tdim) > 1:
dimstr = [str(d) for d in tdim[1:]]
else:
if len(tdim) > 1 or tdim[0] > 1:
dimstr = [str(d) for d in tdim]
if dimstr != '':
dimstr = ','.join(dimstr)
dimstr = 'array[%s]' % dimstr
return dimstr | [
"def",
"_get_col_dimstr",
"(",
"tdim",
",",
"is_string",
"=",
"False",
")",
":",
"dimstr",
"=",
"''",
"if",
"tdim",
"is",
"None",
":",
"dimstr",
"=",
"'array[bad TDIM]'",
"else",
":",
"if",
"is_string",
":",
"if",
"len",
"(",
"tdim",
")",
">",
"1",
":",
"dimstr",
"=",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"tdim",
"[",
"1",
":",
"]",
"]",
"else",
":",
"if",
"len",
"(",
"tdim",
")",
">",
"1",
"or",
"tdim",
"[",
"0",
"]",
">",
"1",
":",
"dimstr",
"=",
"[",
"str",
"(",
"d",
")",
"for",
"d",
"in",
"tdim",
"]",
"if",
"dimstr",
"!=",
"''",
":",
"dimstr",
"=",
"','",
".",
"join",
"(",
"dimstr",
")",
"dimstr",
"=",
"'array[%s]'",
"%",
"dimstr",
"return",
"dimstr"
] | not for variable length | [
"not",
"for",
"variable",
"length"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L2019-L2037 | train | 234,979 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_colname | def get_colname(self, colnum):
"""
Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset
"""
if colnum < 0 or colnum > (len(self._colnames)-1):
raise ValueError(
"colnum out of range [0,%s-1]" % (0, len(self._colnames)))
return self._colnames[colnum] | python | def get_colname(self, colnum):
"""
Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset
"""
if colnum < 0 or colnum > (len(self._colnames)-1):
raise ValueError(
"colnum out of range [0,%s-1]" % (0, len(self._colnames)))
return self._colnames[colnum] | [
"def",
"get_colname",
"(",
"self",
",",
"colnum",
")",
":",
"if",
"colnum",
"<",
"0",
"or",
"colnum",
">",
"(",
"len",
"(",
"self",
".",
"_colnames",
")",
"-",
"1",
")",
":",
"raise",
"ValueError",
"(",
"\"colnum out of range [0,%s-1]\"",
"%",
"(",
"0",
",",
"len",
"(",
"self",
".",
"_colnames",
")",
")",
")",
"return",
"self",
".",
"_colnames",
"[",
"colnum",
"]"
] | Get the name associated with the given column number
parameters
----------
colnum: integer
The number for the column, zero offset | [
"Get",
"the",
"name",
"associated",
"with",
"the",
"given",
"column",
"number"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L84-L96 | train | 234,980 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.write_column | def write_column(self, column, data, **keys):
"""
Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
firstrow = keys.get('firstrow', 0)
colnum = self._extract_colnum(column)
# need it to be contiguous and native byte order. For now, make a
# copy. but we may be able to avoid this with some care.
if not data.flags['C_CONTIGUOUS']:
# this always makes a copy
data_send = numpy.ascontiguousarray(data)
# this is a copy, we can make sure it is native
# and modify in place if needed
array_to_native(data_send, inplace=True)
else:
# we can avoid the copy with a try-finally block and
# some logic
data_send = array_to_native(data, inplace=False)
if IS_PY3 and data_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
data_send = data_send.astype('S', copy=False)
self._verify_column_data(colnum, data_send)
self._FITS.write_column(
self._ext+1, colnum+1, data_send,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
del data_send
self._update_info() | python | def write_column(self, column, data, **keys):
"""
Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
firstrow = keys.get('firstrow', 0)
colnum = self._extract_colnum(column)
# need it to be contiguous and native byte order. For now, make a
# copy. but we may be able to avoid this with some care.
if not data.flags['C_CONTIGUOUS']:
# this always makes a copy
data_send = numpy.ascontiguousarray(data)
# this is a copy, we can make sure it is native
# and modify in place if needed
array_to_native(data_send, inplace=True)
else:
# we can avoid the copy with a try-finally block and
# some logic
data_send = array_to_native(data, inplace=False)
if IS_PY3 and data_send.dtype.char == 'U':
# for python3, we convert unicode to ascii
# this will error if the character is not in ascii
data_send = data_send.astype('S', copy=False)
self._verify_column_data(colnum, data_send)
self._FITS.write_column(
self._ext+1, colnum+1, data_send,
firstrow=firstrow+1, write_bitcols=self.write_bitcols)
del data_send
self._update_info() | [
"def",
"write_column",
"(",
"self",
",",
"column",
",",
"data",
",",
"*",
"*",
"keys",
")",
":",
"firstrow",
"=",
"keys",
".",
"get",
"(",
"'firstrow'",
",",
"0",
")",
"colnum",
"=",
"self",
".",
"_extract_colnum",
"(",
"column",
")",
"# need it to be contiguous and native byte order. For now, make a",
"# copy. but we may be able to avoid this with some care.",
"if",
"not",
"data",
".",
"flags",
"[",
"'C_CONTIGUOUS'",
"]",
":",
"# this always makes a copy",
"data_send",
"=",
"numpy",
".",
"ascontiguousarray",
"(",
"data",
")",
"# this is a copy, we can make sure it is native",
"# and modify in place if needed",
"array_to_native",
"(",
"data_send",
",",
"inplace",
"=",
"True",
")",
"else",
":",
"# we can avoid the copy with a try-finally block and",
"# some logic",
"data_send",
"=",
"array_to_native",
"(",
"data",
",",
"inplace",
"=",
"False",
")",
"if",
"IS_PY3",
"and",
"data_send",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# for python3, we convert unicode to ascii",
"# this will error if the character is not in ascii",
"data_send",
"=",
"data_send",
".",
"astype",
"(",
"'S'",
",",
"copy",
"=",
"False",
")",
"self",
".",
"_verify_column_data",
"(",
"colnum",
",",
"data_send",
")",
"self",
".",
"_FITS",
".",
"write_column",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"data_send",
",",
"firstrow",
"=",
"firstrow",
"+",
"1",
",",
"write_bitcols",
"=",
"self",
".",
"write_bitcols",
")",
"del",
"data_send",
"self",
".",
"_update_info",
"(",
")"
] | Write data to a column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This should match the
shape of the column. You are probably better using
fits.write_table() to be sure.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0. | [
"Write",
"data",
"to",
"a",
"column",
"in",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L242-L290 | train | 234,981 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._verify_column_data | def _verify_column_data(self, colnum, data):
"""
verify the input data is of the correct type and shape
"""
this_dt = data.dtype.descr[0]
if len(data.shape) > 2:
this_shape = data.shape[1:]
elif len(data.shape) == 2 and data.shape[1] > 1:
this_shape = data.shape[1:]
else:
this_shape = ()
this_npy_type = this_dt[1][1:]
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
if npy_type[0] in ['>', '<', '|']:
npy_type = npy_type[1:]
col_name = info['name']
col_tdim = info['tdim']
col_shape = _tdim2shape(
col_tdim, col_name, is_string=(npy_type[0] == 'S'))
if col_shape is None:
if this_shape == ():
this_shape = None
if col_shape is not None and not isinstance(col_shape, tuple):
col_shape = (col_shape,)
"""
print('column name:',col_name)
print(data.shape)
print('col tdim', info['tdim'])
print('column dtype:',npy_type)
print('input dtype:',this_npy_type)
print('column shape:',col_shape)
print('input shape:',this_shape)
print()
"""
# this mismatch is OK
if npy_type == 'i1' and this_npy_type == 'b1':
this_npy_type = 'i1'
if isinstance(self, AsciiTableHDU):
# we don't enforce types exact for ascii
if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
this_npy_type = 'i8'
elif npy_type == 'f8' and this_npy_type == 'f4':
this_npy_type = 'f8'
if this_npy_type != npy_type:
raise ValueError(
"bad input data for column '%s': "
"expected '%s', got '%s'" % (
col_name, npy_type, this_npy_type))
if this_shape != col_shape:
raise ValueError(
"bad input shape for column '%s': "
"expected '%s', got '%s'" % (col_name, col_shape, this_shape)) | python | def _verify_column_data(self, colnum, data):
"""
verify the input data is of the correct type and shape
"""
this_dt = data.dtype.descr[0]
if len(data.shape) > 2:
this_shape = data.shape[1:]
elif len(data.shape) == 2 and data.shape[1] > 1:
this_shape = data.shape[1:]
else:
this_shape = ()
this_npy_type = this_dt[1][1:]
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
if npy_type[0] in ['>', '<', '|']:
npy_type = npy_type[1:]
col_name = info['name']
col_tdim = info['tdim']
col_shape = _tdim2shape(
col_tdim, col_name, is_string=(npy_type[0] == 'S'))
if col_shape is None:
if this_shape == ():
this_shape = None
if col_shape is not None and not isinstance(col_shape, tuple):
col_shape = (col_shape,)
"""
print('column name:',col_name)
print(data.shape)
print('col tdim', info['tdim'])
print('column dtype:',npy_type)
print('input dtype:',this_npy_type)
print('column shape:',col_shape)
print('input shape:',this_shape)
print()
"""
# this mismatch is OK
if npy_type == 'i1' and this_npy_type == 'b1':
this_npy_type = 'i1'
if isinstance(self, AsciiTableHDU):
# we don't enforce types exact for ascii
if npy_type == 'i8' and this_npy_type in ['i2', 'i4']:
this_npy_type = 'i8'
elif npy_type == 'f8' and this_npy_type == 'f4':
this_npy_type = 'f8'
if this_npy_type != npy_type:
raise ValueError(
"bad input data for column '%s': "
"expected '%s', got '%s'" % (
col_name, npy_type, this_npy_type))
if this_shape != col_shape:
raise ValueError(
"bad input shape for column '%s': "
"expected '%s', got '%s'" % (col_name, col_shape, this_shape)) | [
"def",
"_verify_column_data",
"(",
"self",
",",
"colnum",
",",
"data",
")",
":",
"this_dt",
"=",
"data",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"if",
"len",
"(",
"data",
".",
"shape",
")",
">",
"2",
":",
"this_shape",
"=",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"elif",
"len",
"(",
"data",
".",
"shape",
")",
"==",
"2",
"and",
"data",
".",
"shape",
"[",
"1",
"]",
">",
"1",
":",
"this_shape",
"=",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"else",
":",
"this_shape",
"=",
"(",
")",
"this_npy_type",
"=",
"this_dt",
"[",
"1",
"]",
"[",
"1",
":",
"]",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"info",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"if",
"npy_type",
"[",
"0",
"]",
"in",
"[",
"'>'",
",",
"'<'",
",",
"'|'",
"]",
":",
"npy_type",
"=",
"npy_type",
"[",
"1",
":",
"]",
"col_name",
"=",
"info",
"[",
"'name'",
"]",
"col_tdim",
"=",
"info",
"[",
"'tdim'",
"]",
"col_shape",
"=",
"_tdim2shape",
"(",
"col_tdim",
",",
"col_name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
")",
")",
"if",
"col_shape",
"is",
"None",
":",
"if",
"this_shape",
"==",
"(",
")",
":",
"this_shape",
"=",
"None",
"if",
"col_shape",
"is",
"not",
"None",
"and",
"not",
"isinstance",
"(",
"col_shape",
",",
"tuple",
")",
":",
"col_shape",
"=",
"(",
"col_shape",
",",
")",
"\"\"\"\n print('column name:',col_name)\n print(data.shape)\n print('col tdim', info['tdim'])\n print('column dtype:',npy_type)\n print('input dtype:',this_npy_type)\n print('column shape:',col_shape)\n print('input shape:',this_shape)\n print()\n \"\"\"",
"# this mismatch is OK",
"if",
"npy_type",
"==",
"'i1'",
"and",
"this_npy_type",
"==",
"'b1'",
":",
"this_npy_type",
"=",
"'i1'",
"if",
"isinstance",
"(",
"self",
",",
"AsciiTableHDU",
")",
":",
"# we don't enforce types exact for ascii",
"if",
"npy_type",
"==",
"'i8'",
"and",
"this_npy_type",
"in",
"[",
"'i2'",
",",
"'i4'",
"]",
":",
"this_npy_type",
"=",
"'i8'",
"elif",
"npy_type",
"==",
"'f8'",
"and",
"this_npy_type",
"==",
"'f4'",
":",
"this_npy_type",
"=",
"'f8'",
"if",
"this_npy_type",
"!=",
"npy_type",
":",
"raise",
"ValueError",
"(",
"\"bad input data for column '%s': \"",
"\"expected '%s', got '%s'\"",
"%",
"(",
"col_name",
",",
"npy_type",
",",
"this_npy_type",
")",
")",
"if",
"this_shape",
"!=",
"col_shape",
":",
"raise",
"ValueError",
"(",
"\"bad input shape for column '%s': \"",
"\"expected '%s', got '%s'\"",
"%",
"(",
"col_name",
",",
"col_shape",
",",
"this_shape",
")",
")"
] | verify the input data is of the correct type and shape | [
"verify",
"the",
"input",
"data",
"is",
"of",
"the",
"correct",
"type",
"and",
"shape"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L292-L356 | train | 234,982 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.write_var_column | def write_var_column(self, column, data, firstrow=0, **keys):
"""
Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
if not is_object(data):
raise ValueError("Only object fields can be written to "
"variable-length arrays")
colnum = self._extract_colnum(column)
self._FITS.write_var_column(self._ext+1, colnum+1, data,
firstrow=firstrow+1)
self._update_info() | python | def write_var_column(self, column, data, firstrow=0, **keys):
"""
Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0.
"""
if not is_object(data):
raise ValueError("Only object fields can be written to "
"variable-length arrays")
colnum = self._extract_colnum(column)
self._FITS.write_var_column(self._ext+1, colnum+1, data,
firstrow=firstrow+1)
self._update_info() | [
"def",
"write_var_column",
"(",
"self",
",",
"column",
",",
"data",
",",
"firstrow",
"=",
"0",
",",
"*",
"*",
"keys",
")",
":",
"if",
"not",
"is_object",
"(",
"data",
")",
":",
"raise",
"ValueError",
"(",
"\"Only object fields can be written to \"",
"\"variable-length arrays\"",
")",
"colnum",
"=",
"self",
".",
"_extract_colnum",
"(",
"column",
")",
"self",
".",
"_FITS",
".",
"write_var_column",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnum",
"+",
"1",
",",
"data",
",",
"firstrow",
"=",
"firstrow",
"+",
"1",
")",
"self",
".",
"_update_info",
"(",
")"
] | Write data to a variable-length column in this HDU
This HDU must be a table HDU.
parameters
----------
column: scalar string/integer
The column in which to write. Can be the name or number (0 offset)
column: ndarray
Numerical python array to write. This must be an object array.
firstrow: integer, optional
At which row you should begin writing. Be sure you know what you
are doing! For appending see the append() method. Default 0. | [
"Write",
"data",
"to",
"a",
"variable",
"-",
"length",
"column",
"in",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L358-L382 | train | 234,983 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.insert_column | def insert_column(self, name, data, colnum=None):
"""
Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well.
"""
if name in self._colnames:
raise ValueError("column '%s' already exists" % name)
if IS_PY3 and data.dtype.char == 'U':
# fast dtype conversion using an empty array
# we could hack at the actual text description, but using
# the numpy API is probably safer
# this also avoids doing a dtype conversion on every array
# element which could b expensive
descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
else:
descr = data.dtype.descr
if len(descr) > 1:
raise ValueError("you can only insert a single column, "
"requested: %s" % descr)
this_descr = descr[0]
this_descr = [name, this_descr[1]]
if len(data.shape) > 1:
this_descr += [data.shape[1:]]
this_descr = tuple(this_descr)
name, fmt, dims = _npy2fits(
this_descr,
table_type=self._table_type_str)
if dims is not None:
dims = [dims]
if colnum is None:
new_colnum = len(self._info['colinfo']) + 1
else:
new_colnum = colnum+1
self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
self._update_info()
self.write_column(name, data) | python | def insert_column(self, name, data, colnum=None):
"""
Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well.
"""
if name in self._colnames:
raise ValueError("column '%s' already exists" % name)
if IS_PY3 and data.dtype.char == 'U':
# fast dtype conversion using an empty array
# we could hack at the actual text description, but using
# the numpy API is probably safer
# this also avoids doing a dtype conversion on every array
# element which could b expensive
descr = numpy.empty(1).astype(data.dtype).astype('S').dtype.descr
else:
descr = data.dtype.descr
if len(descr) > 1:
raise ValueError("you can only insert a single column, "
"requested: %s" % descr)
this_descr = descr[0]
this_descr = [name, this_descr[1]]
if len(data.shape) > 1:
this_descr += [data.shape[1:]]
this_descr = tuple(this_descr)
name, fmt, dims = _npy2fits(
this_descr,
table_type=self._table_type_str)
if dims is not None:
dims = [dims]
if colnum is None:
new_colnum = len(self._info['colinfo']) + 1
else:
new_colnum = colnum+1
self._FITS.insert_col(self._ext+1, new_colnum, name, fmt, tdim=dims)
self._update_info()
self.write_column(name, data) | [
"def",
"insert_column",
"(",
"self",
",",
"name",
",",
"data",
",",
"colnum",
"=",
"None",
")",
":",
"if",
"name",
"in",
"self",
".",
"_colnames",
":",
"raise",
"ValueError",
"(",
"\"column '%s' already exists\"",
"%",
"name",
")",
"if",
"IS_PY3",
"and",
"data",
".",
"dtype",
".",
"char",
"==",
"'U'",
":",
"# fast dtype conversion using an empty array",
"# we could hack at the actual text description, but using",
"# the numpy API is probably safer",
"# this also avoids doing a dtype conversion on every array",
"# element which could b expensive",
"descr",
"=",
"numpy",
".",
"empty",
"(",
"1",
")",
".",
"astype",
"(",
"data",
".",
"dtype",
")",
".",
"astype",
"(",
"'S'",
")",
".",
"dtype",
".",
"descr",
"else",
":",
"descr",
"=",
"data",
".",
"dtype",
".",
"descr",
"if",
"len",
"(",
"descr",
")",
">",
"1",
":",
"raise",
"ValueError",
"(",
"\"you can only insert a single column, \"",
"\"requested: %s\"",
"%",
"descr",
")",
"this_descr",
"=",
"descr",
"[",
"0",
"]",
"this_descr",
"=",
"[",
"name",
",",
"this_descr",
"[",
"1",
"]",
"]",
"if",
"len",
"(",
"data",
".",
"shape",
")",
">",
"1",
":",
"this_descr",
"+=",
"[",
"data",
".",
"shape",
"[",
"1",
":",
"]",
"]",
"this_descr",
"=",
"tuple",
"(",
"this_descr",
")",
"name",
",",
"fmt",
",",
"dims",
"=",
"_npy2fits",
"(",
"this_descr",
",",
"table_type",
"=",
"self",
".",
"_table_type_str",
")",
"if",
"dims",
"is",
"not",
"None",
":",
"dims",
"=",
"[",
"dims",
"]",
"if",
"colnum",
"is",
"None",
":",
"new_colnum",
"=",
"len",
"(",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
")",
"+",
"1",
"else",
":",
"new_colnum",
"=",
"colnum",
"+",
"1",
"self",
".",
"_FITS",
".",
"insert_col",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"new_colnum",
",",
"name",
",",
"fmt",
",",
"tdim",
"=",
"dims",
")",
"self",
".",
"_update_info",
"(",
")",
"self",
".",
"write_column",
"(",
"name",
",",
"data",
")"
] | Insert a new column.
parameters
----------
name: string
The column name
data:
The data to write into the new column.
colnum: int, optional
The column number for the new column, zero-offset. Default
is to add the new column after the existing ones.
Notes
-----
This method is used un-modified by ascii tables as well. | [
"Insert",
"a",
"new",
"column",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L384-L439 | train | 234,984 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.append | def append(self, data, **keys):
"""
Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers
"""
firstrow = self._info['nrows']
keys['firstrow'] = firstrow
self.write(data, **keys) | python | def append(self, data, **keys):
"""
Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers
"""
firstrow = self._info['nrows']
keys['firstrow'] = firstrow
self.write(data, **keys) | [
"def",
"append",
"(",
"self",
",",
"data",
",",
"*",
"*",
"keys",
")",
":",
"firstrow",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"keys",
"[",
"'firstrow'",
"]",
"=",
"firstrow",
"self",
".",
"write",
"(",
"data",
",",
"*",
"*",
"keys",
")"
] | Append new rows to a table HDU
parameters
----------
data: ndarray or list of arrays
A numerical python array with fields (recarray) or a list of
arrays. Should have the same fields as the existing table. If only
a subset of the table columns are present, the other columns are
filled with zeros.
columns: list, optional
if a list of arrays is sent, also send the columns
of names or column numbers | [
"Append",
"new",
"rows",
"to",
"a",
"table",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L441-L462 | train | 234,985 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.delete_rows | def delete_rows(self, rows):
"""
Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete)
"""
if rows is None:
return
# extract and convert to 1-offset for C routine
if isinstance(rows, slice):
rows = self._process_slice(rows)
if rows.step is not None and rows.step != 1:
rows = numpy.arange(
rows.start+1,
rows.stop+1,
rows.step,
)
else:
# rows must be 1-offset
rows = slice(rows.start+1, rows.stop+1)
else:
rows = self._extract_rows(rows)
# rows must be 1-offset
rows += 1
if isinstance(rows, slice):
self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
else:
if rows.size == 0:
return
self._FITS.delete_rows(self._ext+1, rows)
self._update_info() | python | def delete_rows(self, rows):
"""
Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete)
"""
if rows is None:
return
# extract and convert to 1-offset for C routine
if isinstance(rows, slice):
rows = self._process_slice(rows)
if rows.step is not None and rows.step != 1:
rows = numpy.arange(
rows.start+1,
rows.stop+1,
rows.step,
)
else:
# rows must be 1-offset
rows = slice(rows.start+1, rows.stop+1)
else:
rows = self._extract_rows(rows)
# rows must be 1-offset
rows += 1
if isinstance(rows, slice):
self._FITS.delete_row_range(self._ext+1, rows.start, rows.stop)
else:
if rows.size == 0:
return
self._FITS.delete_rows(self._ext+1, rows)
self._update_info() | [
"def",
"delete_rows",
"(",
"self",
",",
"rows",
")",
":",
"if",
"rows",
"is",
"None",
":",
"return",
"# extract and convert to 1-offset for C routine",
"if",
"isinstance",
"(",
"rows",
",",
"slice",
")",
":",
"rows",
"=",
"self",
".",
"_process_slice",
"(",
"rows",
")",
"if",
"rows",
".",
"step",
"is",
"not",
"None",
"and",
"rows",
".",
"step",
"!=",
"1",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"rows",
".",
"start",
"+",
"1",
",",
"rows",
".",
"stop",
"+",
"1",
",",
"rows",
".",
"step",
",",
")",
"else",
":",
"# rows must be 1-offset",
"rows",
"=",
"slice",
"(",
"rows",
".",
"start",
"+",
"1",
",",
"rows",
".",
"stop",
"+",
"1",
")",
"else",
":",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"# rows must be 1-offset",
"rows",
"+=",
"1",
"if",
"isinstance",
"(",
"rows",
",",
"slice",
")",
":",
"self",
".",
"_FITS",
".",
"delete_row_range",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"rows",
".",
"start",
",",
"rows",
".",
"stop",
")",
"else",
":",
"if",
"rows",
".",
"size",
"==",
"0",
":",
"return",
"self",
".",
"_FITS",
".",
"delete_rows",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"rows",
")",
"self",
".",
"_update_info",
"(",
")"
] | Delete rows from the table
parameters
----------
rows: sequence or slice
The exact rows to delete as a sequence, or a slice.
examples
--------
# delete a range of rows
with fitsio.FITS(fname,'rw') as fits:
fits['mytable'].delete_rows(slice(3,20))
# delete specific rows
with fitsio.FITS(fname,'rw') as fits:
rows2delete = [3,88,76]
fits['mytable'].delete_rows(rows2delete) | [
"Delete",
"rows",
"from",
"the",
"table"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L464-L513 | train | 234,986 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.resize | def resize(self, nrows, front=False):
"""
Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False
"""
nrows_current = self.get_nrows()
if nrows == nrows_current:
return
if nrows < nrows_current:
rowdiff = nrows_current - nrows
if front:
# delete from the front
start = 0
stop = rowdiff
else:
# delete from the back
start = nrows
stop = nrows_current
self.delete_rows(slice(start, stop))
else:
rowdiff = nrows - nrows_current
if front:
# in this case zero is what we want, since the code inserts
firstrow = 0
else:
firstrow = nrows_current
self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
self._update_info() | python | def resize(self, nrows, front=False):
"""
Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False
"""
nrows_current = self.get_nrows()
if nrows == nrows_current:
return
if nrows < nrows_current:
rowdiff = nrows_current - nrows
if front:
# delete from the front
start = 0
stop = rowdiff
else:
# delete from the back
start = nrows
stop = nrows_current
self.delete_rows(slice(start, stop))
else:
rowdiff = nrows - nrows_current
if front:
# in this case zero is what we want, since the code inserts
firstrow = 0
else:
firstrow = nrows_current
self._FITS.insert_rows(self._ext+1, firstrow, rowdiff)
self._update_info() | [
"def",
"resize",
"(",
"self",
",",
"nrows",
",",
"front",
"=",
"False",
")",
":",
"nrows_current",
"=",
"self",
".",
"get_nrows",
"(",
")",
"if",
"nrows",
"==",
"nrows_current",
":",
"return",
"if",
"nrows",
"<",
"nrows_current",
":",
"rowdiff",
"=",
"nrows_current",
"-",
"nrows",
"if",
"front",
":",
"# delete from the front",
"start",
"=",
"0",
"stop",
"=",
"rowdiff",
"else",
":",
"# delete from the back",
"start",
"=",
"nrows",
"stop",
"=",
"nrows_current",
"self",
".",
"delete_rows",
"(",
"slice",
"(",
"start",
",",
"stop",
")",
")",
"else",
":",
"rowdiff",
"=",
"nrows",
"-",
"nrows_current",
"if",
"front",
":",
"# in this case zero is what we want, since the code inserts",
"firstrow",
"=",
"0",
"else",
":",
"firstrow",
"=",
"nrows_current",
"self",
".",
"_FITS",
".",
"insert_rows",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"firstrow",
",",
"rowdiff",
")",
"self",
".",
"_update_info",
"(",
")"
] | Resize the table to the given size, removing or adding rows as
necessary. Note if expanding the table at the end, it is more
efficient to use the append function than resizing and then
writing.
New added rows are zerod, except for 'i1', 'u2' and 'u4' data types
which get -128,32768,2147483648 respectively
parameters
----------
nrows: int
new size of table
front: bool, optional
If True, add or remove rows from the front. Default
is False | [
"Resize",
"the",
"table",
"to",
"the",
"given",
"size",
"removing",
"or",
"adding",
"rows",
"as",
"necessary",
".",
"Note",
"if",
"expanding",
"the",
"table",
"at",
"the",
"end",
"it",
"is",
"more",
"efficient",
"to",
"use",
"the",
"append",
"function",
"than",
"resizing",
"and",
"then",
"writing",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L515-L560 | train | 234,987 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read | def read(self, **keys):
"""
read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
columns = keys.get('columns', None)
rows = keys.get('rows', None)
if columns is not None:
if 'columns' in keys:
del keys['columns']
data = self.read_columns(columns, **keys)
elif rows is not None:
if 'rows' in keys:
del keys['rows']
data = self.read_rows(rows, **keys)
else:
data = self._read_all(**keys)
return data | python | def read(self, **keys):
"""
read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
columns = keys.get('columns', None)
rows = keys.get('rows', None)
if columns is not None:
if 'columns' in keys:
del keys['columns']
data = self.read_columns(columns, **keys)
elif rows is not None:
if 'rows' in keys:
del keys['rows']
data = self.read_rows(rows, **keys)
else:
data = self._read_all(**keys)
return data | [
"def",
"read",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"columns",
"=",
"keys",
".",
"get",
"(",
"'columns'",
",",
"None",
")",
"rows",
"=",
"keys",
".",
"get",
"(",
"'rows'",
",",
"None",
")",
"if",
"columns",
"is",
"not",
"None",
":",
"if",
"'columns'",
"in",
"keys",
":",
"del",
"keys",
"[",
"'columns'",
"]",
"data",
"=",
"self",
".",
"read_columns",
"(",
"columns",
",",
"*",
"*",
"keys",
")",
"elif",
"rows",
"is",
"not",
"None",
":",
"if",
"'rows'",
"in",
"keys",
":",
"del",
"keys",
"[",
"'rows'",
"]",
"data",
"=",
"self",
".",
"read_rows",
"(",
"rows",
",",
"*",
"*",
"keys",
")",
"else",
":",
"data",
"=",
"self",
".",
"_read_all",
"(",
"*",
"*",
"keys",
")",
"return",
"data"
] | read data from this HDU
By default, all data are read.
send columns= and rows= to select subsets of the data.
Table data are read into a recarray; use read_column() to get a single
column as an ordinary array. You can alternatively use slice notation
fits=fitsio.FITS(filename)
fits[ext][:]
fits[ext][2:5]
fits[ext][200:235:2]
fits[ext][rows]
fits[ext][cols][rows]
parameters
----------
columns: optional
An optional set of columns to read from table HDUs. Default is to
read all. Can be string or number. If a sequence, a recarray
is always returned. If a scalar, an ordinary array is returned.
rows: optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details. | [
"read",
"data",
"from",
"this",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L562-L606 | train | 234,988 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._read_all | def _read_all(self, **keys):
"""
Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
has_tbit = self._check_tbit()
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
rows = None
array = self._read_rec_with_var(colnums, rows, dtype,
offsets, isvar, vstorage)
elif has_tbit:
# drop down to read_columns since we can't stuff into a
# contiguous array
colnums = self._extract_colnums()
array = self.read_columns(colnums, **keys)
else:
firstrow = 1 # noqa - not used?
nrows = self._info['nrows']
array = numpy.zeros(nrows, dtype=dtype)
self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def _read_all(self, **keys):
"""
Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
has_tbit = self._check_tbit()
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
rows = None
array = self._read_rec_with_var(colnums, rows, dtype,
offsets, isvar, vstorage)
elif has_tbit:
# drop down to read_columns since we can't stuff into a
# contiguous array
colnums = self._extract_colnums()
array = self.read_columns(colnums, **keys)
else:
firstrow = 1 # noqa - not used?
nrows = self._info['nrows']
array = numpy.zeros(nrows, dtype=dtype)
self._FITS.read_as_rec(self._ext+1, 1, nrows, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"_read_all",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"has_tbit",
"=",
"self",
".",
"_check_tbit",
"(",
")",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"rows",
"=",
"None",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"elif",
"has_tbit",
":",
"# drop down to read_columns since we can't stuff into a",
"# contiguous array",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"array",
"=",
"self",
".",
"read_columns",
"(",
"colnums",
",",
"*",
"*",
"keys",
")",
"else",
":",
"firstrow",
"=",
"1",
"# noqa - not used?",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_FITS",
".",
"read_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"1",
",",
"nrows",
",",
"array",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read all data in the HDU.
parameters
----------
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"all",
"data",
"in",
"the",
"HDU",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L608-L665 | train | 234,989 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_column | def read_column(self, col, **keys):
"""
Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
res = self.read_columns([col], **keys)
colname = res.dtype.names[0]
data = res[colname]
self._maybe_trim_strings(data, **keys)
return data | python | def read_column(self, col, **keys):
"""
Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
"""
res = self.read_columns([col], **keys)
colname = res.dtype.names[0]
data = res[colname]
self._maybe_trim_strings(data, **keys)
return data | [
"def",
"read_column",
"(",
"self",
",",
"col",
",",
"*",
"*",
"keys",
")",
":",
"res",
"=",
"self",
".",
"read_columns",
"(",
"[",
"col",
"]",
",",
"*",
"*",
"keys",
")",
"colname",
"=",
"res",
".",
"dtype",
".",
"names",
"[",
"0",
"]",
"data",
"=",
"res",
"[",
"colname",
"]",
"self",
".",
"_maybe_trim_strings",
"(",
"data",
",",
"*",
"*",
"keys",
")",
"return",
"data"
] | Read the specified column
Alternatively, you can use slice notation
fits=fitsio.FITS(filename)
fits[ext][colname][:]
fits[ext][colname][2:5]
fits[ext][colname][200:235:2]
fits[ext][colname][rows]
Note, if reading multiple columns, it is more efficient to use
read(columns=) or slice notation with a list of column names.
parameters
----------
col: string/int, required
The column name or number.
rows: optional
An optional set of row numbers to read.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details. | [
"Read",
"the",
"specified",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L667-L697 | train | 234,990 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_rows | def read_rows(self, rows, **keys):
"""
Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if rows is None:
# we actually want all rows!
return self._read_all()
if self._info['hdutype'] == ASCII_TBL:
keys['rows'] = rows
return self.read(**keys)
rows = self._extract_rows(rows)
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
return self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
array = numpy.zeros(rows.size, dtype=dtype)
self._FITS.read_rows_as_rec(self._ext+1, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_rows(self, rows, **keys):
"""
Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if rows is None:
# we actually want all rows!
return self._read_all()
if self._info['hdutype'] == ASCII_TBL:
keys['rows'] = rows
return self.read(**keys)
rows = self._extract_rows(rows)
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
colnums = self._extract_colnums()
return self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
array = numpy.zeros(rows.size, dtype=dtype)
self._FITS.read_rows_as_rec(self._ext+1, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_rows",
"(",
"self",
",",
"rows",
",",
"*",
"*",
"keys",
")",
":",
"if",
"rows",
"is",
"None",
":",
"# we actually want all rows!",
"return",
"self",
".",
"_read_all",
"(",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"keys",
"[",
"'rows'",
"]",
"=",
"rows",
"return",
"self",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"return",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"rows",
".",
"size",
",",
"dtype",
"=",
"dtype",
")",
"self",
".",
"_FITS",
".",
"read_rows_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"array",
",",
"rows",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read the specified rows.
parameters
----------
rows: list,array
A list or array of row indices.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"the",
"specified",
"rows",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L699-L756 | train | 234,991 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_columns | def read_columns(self, columns, **keys):
"""
read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
keys['columns'] = columns
return self.read(**keys)
rows = keys.get('rows', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
colnumsp = colnums[:].copy()
colnumsp[:] += 1
self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for i in xrange(colnums.size):
colnum = int(colnums[i])
name = array.dtype.names[i]
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
if (self._check_tbit(colnums=colnums)):
array = self._fix_tbit_dtype(array, colnums)
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_columns(self, columns, **keys):
"""
read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
keys['columns'] = columns
return self.read(**keys)
rows = keys.get('rows', None)
# if columns is None, returns all. Guaranteed to be unique and sorted
colnums = self._extract_colnums(columns)
if isinstance(colnums, int):
# scalar sent, don't read as a recarray
return self.read_column(columns, **keys)
# if rows is None still returns None, and is correctly interpreted
# by the reader to mean all
rows = self._extract_rows(rows)
# this is the full dtype for all columns
dtype, offsets, isvar = self.get_rec_dtype(colnums=colnums, **keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
colnumsp = colnums[:].copy()
colnumsp[:] += 1
self._FITS.read_columns_as_rec(self._ext+1, colnumsp, array, rows)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
for i in xrange(colnums.size):
colnum = int(colnums[i])
name = array.dtype.names[i]
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
if (self._check_tbit(colnums=colnums)):
array = self._fix_tbit_dtype(array, colnums)
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_columns",
"(",
"self",
",",
"columns",
",",
"*",
"*",
"keys",
")",
":",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"keys",
"[",
"'columns'",
"]",
"=",
"columns",
"return",
"self",
".",
"read",
"(",
"*",
"*",
"keys",
")",
"rows",
"=",
"keys",
".",
"get",
"(",
"'rows'",
",",
"None",
")",
"# if columns is None, returns all. Guaranteed to be unique and sorted",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
"columns",
")",
"if",
"isinstance",
"(",
"colnums",
",",
"int",
")",
":",
"# scalar sent, don't read as a recarray",
"return",
"self",
".",
"read_column",
"(",
"columns",
",",
"*",
"*",
"keys",
")",
"# if rows is None still returns None, and is correctly interpreted",
"# by the reader to mean all",
"rows",
"=",
"self",
".",
"_extract_rows",
"(",
"rows",
")",
"# this is the full dtype for all columns",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"colnums",
"=",
"colnums",
",",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"colnumsp",
"=",
"colnums",
"[",
":",
"]",
".",
"copy",
"(",
")",
"colnumsp",
"[",
":",
"]",
"+=",
"1",
"self",
".",
"_FITS",
".",
"read_columns_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnumsp",
",",
"array",
",",
"rows",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"i",
"in",
"xrange",
"(",
"colnums",
".",
"size",
")",
":",
"colnum",
"=",
"int",
"(",
"colnums",
"[",
"i",
"]",
")",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"i",
"]",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"if",
"(",
"self",
".",
"_check_tbit",
"(",
"colnums",
"=",
"colnums",
")",
")",
":",
"array",
"=",
"self",
".",
"_fix_tbit_dtype",
"(",
"array",
",",
"colnums",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | read a subset of columns from this binary table HDU
By default, all rows are read. Send rows= to select subsets of the
data. Table data are read into a recarray for multiple columns,
plain array for a single column.
parameters
----------
columns: list/array
An optional set of columns to read from table HDUs. Can be string
or number. If a sequence, a recarray is always returned. If a
scalar, an ordinary array is returned.
rows: list/array, optional
An optional list of rows to read from table HDUS. Default is to
read all.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"read",
"a",
"subset",
"of",
"columns",
"from",
"this",
"binary",
"table",
"HDU"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L758-L845 | train | 234,992 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.read_slice | def read_slice(self, firstrow, lastrow, step=1, **keys):
"""
Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
keys['rows'] = rows
return self.read_ascii(**keys)
step = keys.get('step', 1)
if self._info['hdutype'] == IMAGE_HDU:
raise ValueError("slices currently only supported for tables")
maxrow = self._info['nrows']
if firstrow < 0 or lastrow > maxrow:
raise ValueError(
"slice must specify a sub-range of [%d,%d]" % (0, maxrow))
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
colnums = self._extract_colnums()
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if step != 1:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
array = self.read(rows=rows)
else:
# no +1 because lastrow is non-inclusive
nrows = lastrow - firstrow
array = numpy.zeros(nrows, dtype=dtype)
# only first needs to be +1. This is becuase the c code is
# inclusive
self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | python | def read_slice(self, firstrow, lastrow, step=1, **keys):
"""
Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction.
"""
if self._info['hdutype'] == ASCII_TBL:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
keys['rows'] = rows
return self.read_ascii(**keys)
step = keys.get('step', 1)
if self._info['hdutype'] == IMAGE_HDU:
raise ValueError("slices currently only supported for tables")
maxrow = self._info['nrows']
if firstrow < 0 or lastrow > maxrow:
raise ValueError(
"slice must specify a sub-range of [%d,%d]" % (0, maxrow))
dtype, offsets, isvar = self.get_rec_dtype(**keys)
w, = numpy.where(isvar == True) # noqa
if w.size > 0:
vstorage = keys.get('vstorage', self._vstorage)
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
colnums = self._extract_colnums()
array = self._read_rec_with_var(
colnums, rows, dtype, offsets, isvar, vstorage)
else:
if step != 1:
rows = numpy.arange(firstrow, lastrow, step, dtype='i8')
array = self.read(rows=rows)
else:
# no +1 because lastrow is non-inclusive
nrows = lastrow - firstrow
array = numpy.zeros(nrows, dtype=dtype)
# only first needs to be +1. This is becuase the c code is
# inclusive
self._FITS.read_as_rec(self._ext+1, firstrow+1, lastrow, array)
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(
array)
for colnum, name in enumerate(array.dtype.names):
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
lower = keys.get('lower', False)
upper = keys.get('upper', False)
if self.lower or lower:
_names_to_lower_if_recarray(array)
elif self.upper or upper:
_names_to_upper_if_recarray(array)
self._maybe_trim_strings(array, **keys)
return array | [
"def",
"read_slice",
"(",
"self",
",",
"firstrow",
",",
"lastrow",
",",
"step",
"=",
"1",
",",
"*",
"*",
"keys",
")",
":",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"ASCII_TBL",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"keys",
"[",
"'rows'",
"]",
"=",
"rows",
"return",
"self",
".",
"read_ascii",
"(",
"*",
"*",
"keys",
")",
"step",
"=",
"keys",
".",
"get",
"(",
"'step'",
",",
"1",
")",
"if",
"self",
".",
"_info",
"[",
"'hdutype'",
"]",
"==",
"IMAGE_HDU",
":",
"raise",
"ValueError",
"(",
"\"slices currently only supported for tables\"",
")",
"maxrow",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"if",
"firstrow",
"<",
"0",
"or",
"lastrow",
">",
"maxrow",
":",
"raise",
"ValueError",
"(",
"\"slice must specify a sub-range of [%d,%d]\"",
"%",
"(",
"0",
",",
"maxrow",
")",
")",
"dtype",
",",
"offsets",
",",
"isvar",
"=",
"self",
".",
"get_rec_dtype",
"(",
"*",
"*",
"keys",
")",
"w",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"w",
".",
"size",
">",
"0",
":",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"array",
"=",
"self",
".",
"_read_rec_with_var",
"(",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
"else",
":",
"if",
"step",
"!=",
"1",
":",
"rows",
"=",
"numpy",
".",
"arange",
"(",
"firstrow",
",",
"lastrow",
",",
"step",
",",
"dtype",
"=",
"'i8'",
")",
"array",
"=",
"self",
".",
"read",
"(",
"rows",
"=",
"rows",
")",
"else",
":",
"# no +1 because lastrow is non-inclusive",
"nrows",
"=",
"lastrow",
"-",
"firstrow",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"# only first needs to be +1. This is becuase the c code is",
"# inclusive",
"self",
".",
"_FITS",
".",
"read_as_rec",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"firstrow",
"+",
"1",
",",
"lastrow",
",",
"array",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"for",
"colnum",
",",
"name",
"in",
"enumerate",
"(",
"array",
".",
"dtype",
".",
"names",
")",
":",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"lower",
"=",
"keys",
".",
"get",
"(",
"'lower'",
",",
"False",
")",
"upper",
"=",
"keys",
".",
"get",
"(",
"'upper'",
",",
"False",
")",
"if",
"self",
".",
"lower",
"or",
"lower",
":",
"_names_to_lower_if_recarray",
"(",
"array",
")",
"elif",
"self",
".",
"upper",
"or",
"upper",
":",
"_names_to_upper_if_recarray",
"(",
"array",
")",
"self",
".",
"_maybe_trim_strings",
"(",
"array",
",",
"*",
"*",
"keys",
")",
"return",
"array"
] | Read the specified row slice from a table.
Read all rows between firstrow and lastrow (non-inclusive, as per
python slice notation). Note you must use slice notation for
images, e.g. f[ext][20:30, 40:50]
parameters
----------
firstrow: integer
The first row to read
lastrow: integer
The last row to read, non-inclusive. This follows the python list
slice convention that one does not include the last element.
step: integer, optional
Step between rows, default 1. e.g., if step is 2, skip every other
row.
vstorage: string, optional
Over-ride the default method to store variable length columns. Can
be 'fixed' or 'object'. See docs on fitsio.FITS for details.
lower: bool, optional
If True, force all columns names to lower case in output. Will over
ride the lower= keyword from construction.
upper: bool, optional
If True, force all columns names to upper case in output. Will over
ride the lower= keyword from construction. | [
"Read",
"the",
"specified",
"row",
"slice",
"from",
"a",
"table",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L847-L931 | train | 234,993 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_rec_dtype | def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray | python | def get_rec_dtype(self, **keys):
"""
Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns
"""
colnums = keys.get('colnums', None)
vstorage = keys.get('vstorage', self._vstorage)
if colnums is None:
colnums = self._extract_colnums()
descr = []
isvararray = numpy.zeros(len(colnums), dtype=numpy.bool)
for i, colnum in enumerate(colnums):
dt, isvar = self.get_rec_column_descr(colnum, vstorage)
descr.append(dt)
isvararray[i] = isvar
dtype = numpy.dtype(descr)
offsets = numpy.zeros(len(colnums), dtype='i8')
for i, n in enumerate(dtype.names):
offsets[i] = dtype.fields[n][1]
return dtype, offsets, isvararray | [
"def",
"get_rec_dtype",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"colnums",
"=",
"keys",
".",
"get",
"(",
"'colnums'",
",",
"None",
")",
"vstorage",
"=",
"keys",
".",
"get",
"(",
"'vstorage'",
",",
"self",
".",
"_vstorage",
")",
"if",
"colnums",
"is",
"None",
":",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"descr",
"=",
"[",
"]",
"isvararray",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"numpy",
".",
"bool",
")",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"dt",
",",
"isvar",
"=",
"self",
".",
"get_rec_column_descr",
"(",
"colnum",
",",
"vstorage",
")",
"descr",
".",
"append",
"(",
"dt",
")",
"isvararray",
"[",
"i",
"]",
"=",
"isvar",
"dtype",
"=",
"numpy",
".",
"dtype",
"(",
"descr",
")",
"offsets",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"colnums",
")",
",",
"dtype",
"=",
"'i8'",
")",
"for",
"i",
",",
"n",
"in",
"enumerate",
"(",
"dtype",
".",
"names",
")",
":",
"offsets",
"[",
"i",
"]",
"=",
"dtype",
".",
"fields",
"[",
"n",
"]",
"[",
"1",
"]",
"return",
"dtype",
",",
"offsets",
",",
"isvararray"
] | Get the dtype for the specified columns
parameters
----------
colnums: integer array
The column numbers, 0 offset
vstorage: string, optional
See docs in read_columns | [
"Get",
"the",
"dtype",
"for",
"the",
"specified",
"columns"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L933-L961 | train | 234,994 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._check_tbit | def _check_tbit(self, **keys):
"""
Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional
"""
colnums = keys.get('colnums', None)
if colnums is None:
colnums = self._extract_colnums()
has_tbit = False
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
has_tbit = True
break
return has_tbit | python | def _check_tbit(self, **keys):
"""
Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional
"""
colnums = keys.get('colnums', None)
if colnums is None:
colnums = self._extract_colnums()
has_tbit = False
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
has_tbit = True
break
return has_tbit | [
"def",
"_check_tbit",
"(",
"self",
",",
"*",
"*",
"keys",
")",
":",
"colnums",
"=",
"keys",
".",
"get",
"(",
"'colnums'",
",",
"None",
")",
"if",
"colnums",
"is",
"None",
":",
"colnums",
"=",
"self",
".",
"_extract_colnums",
"(",
")",
"has_tbit",
"=",
"False",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"if",
"(",
"istbit",
")",
":",
"has_tbit",
"=",
"True",
"break",
"return",
"has_tbit"
] | Check if one of the columns is a TBIT column
parameters
----------
colnums: integer array, optional | [
"Check",
"if",
"one",
"of",
"the",
"columns",
"is",
"a",
"TBIT",
"column"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L963-L983 | train | 234,995 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._fix_tbit_dtype | def _fix_tbit_dtype(self, array, colnums):
"""
If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup
"""
descr = array.dtype.descr
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
coldescr = list(descr[i])
coldescr[1] = '?'
descr[i] = tuple(coldescr)
return array.view(descr) | python | def _fix_tbit_dtype(self, array, colnums):
"""
If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup
"""
descr = array.dtype.descr
for i, colnum in enumerate(colnums):
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
if (istbit):
coldescr = list(descr[i])
coldescr[1] = '?'
descr[i] = tuple(coldescr)
return array.view(descr) | [
"def",
"_fix_tbit_dtype",
"(",
"self",
",",
"array",
",",
"colnums",
")",
":",
"descr",
"=",
"array",
".",
"dtype",
".",
"descr",
"for",
"i",
",",
"colnum",
"in",
"enumerate",
"(",
"colnums",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"if",
"(",
"istbit",
")",
":",
"coldescr",
"=",
"list",
"(",
"descr",
"[",
"i",
"]",
")",
"coldescr",
"[",
"1",
"]",
"=",
"'?'",
"descr",
"[",
"i",
"]",
"=",
"tuple",
"(",
"coldescr",
")",
"return",
"array",
".",
"view",
"(",
"descr",
")"
] | If necessary, patch up the TBIT to convert to bool array
parameters
----------
array: record array
colnums: column numbers for lookup | [
"If",
"necessary",
"patch",
"up",
"the",
"TBIT",
"to",
"convert",
"to",
"bool",
"array"
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L985-L1002 | train | 234,996 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._get_simple_dtype_and_shape | def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | python | def _get_simple_dtype_and_shape(self, colnum, rows=None):
"""
When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2)
"""
# basic datatype
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
info = self._info['colinfo'][colnum]
name = info['name']
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
shape = None
tdim = info['tdim']
shape = _tdim2shape(tdim, name, is_string=(npy_type[0] == 'S'))
if shape is not None:
if nrows > 1:
if not isinstance(shape, tuple):
# vector
shape = (nrows, shape)
else:
# multi-dimensional
shape = tuple([nrows] + list(shape))
else:
# scalar
shape = nrows
return npy_type, shape | [
"def",
"_get_simple_dtype_and_shape",
"(",
"self",
",",
"colnum",
",",
"rows",
"=",
"None",
")",
":",
"# basic datatype",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"info",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"name",
"=",
"info",
"[",
"'name'",
"]",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"shape",
"=",
"None",
"tdim",
"=",
"info",
"[",
"'tdim'",
"]",
"shape",
"=",
"_tdim2shape",
"(",
"tdim",
",",
"name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
")",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"if",
"nrows",
">",
"1",
":",
"if",
"not",
"isinstance",
"(",
"shape",
",",
"tuple",
")",
":",
"# vector",
"shape",
"=",
"(",
"nrows",
",",
"shape",
")",
"else",
":",
"# multi-dimensional",
"shape",
"=",
"tuple",
"(",
"[",
"nrows",
"]",
"+",
"list",
"(",
"shape",
")",
")",
"else",
":",
"# scalar",
"shape",
"=",
"nrows",
"return",
"npy_type",
",",
"shape"
] | When reading a single column, we want the basic data
type and the shape of the array.
for scalar columns, shape is just nrows, otherwise
it is (nrows, dim1, dim2)
Note if rows= is sent and only a single row is requested,
the shape will be (dim2,dim2) | [
"When",
"reading",
"a",
"single",
"column",
"we",
"want",
"the",
"basic",
"data",
"type",
"and",
"the",
"shape",
"of",
"the",
"array",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1004-L1041 | train | 234,997 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU.get_rec_column_descr | def get_rec_column_descr(self, colnum, vstorage):
"""
Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns
"""
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
name = self._info['colinfo'][colnum]['name']
if isvar:
if vstorage == 'object':
descr = (name, 'O')
else:
tform = self._info['colinfo'][colnum]['tform']
max_size = _extract_vararray_max(tform)
if max_size <= 0:
name = self._info['colinfo'][colnum]['name']
mess = 'Will read as an object field'
if max_size < 0:
mess = "Column '%s': No maximum size: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
else:
mess = "Column '%s': Max size is zero: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
# we are forced to read this as an object array
return self.get_rec_column_descr(colnum, 'object')
if npy_type[0] == 'S':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'S%d' % max_size
descr = (name, npy_type)
elif npy_type[0] == 'U':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'U%d' % max_size
descr = (name, npy_type)
else:
descr = (name, npy_type, max_size)
else:
tdim = self._info['colinfo'][colnum]['tdim']
shape = _tdim2shape(
tdim, name,
is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
if shape is not None:
descr = (name, npy_type, shape)
else:
descr = (name, npy_type)
return descr, isvar | python | def get_rec_column_descr(self, colnum, vstorage):
"""
Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns
"""
npy_type, isvar, istbit = self._get_tbl_numpy_dtype(colnum)
name = self._info['colinfo'][colnum]['name']
if isvar:
if vstorage == 'object':
descr = (name, 'O')
else:
tform = self._info['colinfo'][colnum]['tform']
max_size = _extract_vararray_max(tform)
if max_size <= 0:
name = self._info['colinfo'][colnum]['name']
mess = 'Will read as an object field'
if max_size < 0:
mess = "Column '%s': No maximum size: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
else:
mess = "Column '%s': Max size is zero: '%s'. %s"
mess = mess % (name, tform, mess)
warnings.warn(mess, FITSRuntimeWarning)
# we are forced to read this as an object array
return self.get_rec_column_descr(colnum, 'object')
if npy_type[0] == 'S':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'S%d' % max_size
descr = (name, npy_type)
elif npy_type[0] == 'U':
# variable length string columns cannot
# themselves be arrays I don't think
npy_type = 'U%d' % max_size
descr = (name, npy_type)
else:
descr = (name, npy_type, max_size)
else:
tdim = self._info['colinfo'][colnum]['tdim']
shape = _tdim2shape(
tdim, name,
is_string=(npy_type[0] == 'S' or npy_type[0] == 'U'))
if shape is not None:
descr = (name, npy_type, shape)
else:
descr = (name, npy_type)
return descr, isvar | [
"def",
"get_rec_column_descr",
"(",
"self",
",",
"colnum",
",",
"vstorage",
")",
":",
"npy_type",
",",
"isvar",
",",
"istbit",
"=",
"self",
".",
"_get_tbl_numpy_dtype",
"(",
"colnum",
")",
"name",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'name'",
"]",
"if",
"isvar",
":",
"if",
"vstorage",
"==",
"'object'",
":",
"descr",
"=",
"(",
"name",
",",
"'O'",
")",
"else",
":",
"tform",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tform'",
"]",
"max_size",
"=",
"_extract_vararray_max",
"(",
"tform",
")",
"if",
"max_size",
"<=",
"0",
":",
"name",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'name'",
"]",
"mess",
"=",
"'Will read as an object field'",
"if",
"max_size",
"<",
"0",
":",
"mess",
"=",
"\"Column '%s': No maximum size: '%s'. %s\"",
"mess",
"=",
"mess",
"%",
"(",
"name",
",",
"tform",
",",
"mess",
")",
"warnings",
".",
"warn",
"(",
"mess",
",",
"FITSRuntimeWarning",
")",
"else",
":",
"mess",
"=",
"\"Column '%s': Max size is zero: '%s'. %s\"",
"mess",
"=",
"mess",
"%",
"(",
"name",
",",
"tform",
",",
"mess",
")",
"warnings",
".",
"warn",
"(",
"mess",
",",
"FITSRuntimeWarning",
")",
"# we are forced to read this as an object array",
"return",
"self",
".",
"get_rec_column_descr",
"(",
"colnum",
",",
"'object'",
")",
"if",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
":",
"# variable length string columns cannot",
"# themselves be arrays I don't think",
"npy_type",
"=",
"'S%d'",
"%",
"max_size",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"elif",
"npy_type",
"[",
"0",
"]",
"==",
"'U'",
":",
"# variable length string columns cannot",
"# themselves be arrays I don't think",
"npy_type",
"=",
"'U%d'",
"%",
"max_size",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"else",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
",",
"max_size",
")",
"else",
":",
"tdim",
"=",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tdim'",
"]",
"shape",
"=",
"_tdim2shape",
"(",
"tdim",
",",
"name",
",",
"is_string",
"=",
"(",
"npy_type",
"[",
"0",
"]",
"==",
"'S'",
"or",
"npy_type",
"[",
"0",
"]",
"==",
"'U'",
")",
")",
"if",
"shape",
"is",
"not",
"None",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
",",
"shape",
")",
"else",
":",
"descr",
"=",
"(",
"name",
",",
"npy_type",
")",
"return",
"descr",
",",
"isvar"
] | Get a descriptor entry for the specified column.
parameters
----------
colnum: integer
The column number, 0 offset
vstorage: string
See docs in read_columns | [
"Get",
"a",
"descriptor",
"entry",
"for",
"the",
"specified",
"column",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1043-L1100 | train | 234,998 |
esheldon/fitsio | fitsio/hdu/table.py | TableHDU._read_rec_with_var | def _read_rec_with_var(
self, colnums, rows, dtype, offsets, isvar, vstorage):
"""
Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function
"""
colnumsp = colnums+1
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
# read from the main table first
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wnotvar]
theseoff = offsets[wnotvar]
self._FITS.read_columns_as_rec_byoffset(self._ext+1,
thesecol,
theseoff,
array,
rows)
for i in xrange(thesecol.size):
name = array.dtype.names[wnotvar[i]]
colnum = thesecol[i]-1
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
# now read the variable length arrays we may be able to speed this up
# by storing directly instead of reading first into a list
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wvar]
for i in xrange(thesecol.size):
colnump = thesecol[i]
name = array.dtype.names[wvar[i]]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnump, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
if IS_PY3:
ts = array[name].dtype.descr[0][1][1]
if ts != 'S' and ts != 'U':
array[name][irow][0:ncopy] = item[:]
else:
array[name][irow] = item
else:
array[name][irow][0:ncopy] = item[:]
return array | python | def _read_rec_with_var(
self, colnums, rows, dtype, offsets, isvar, vstorage):
"""
Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function
"""
colnumsp = colnums+1
if rows is None:
nrows = self._info['nrows']
else:
nrows = rows.size
array = numpy.zeros(nrows, dtype=dtype)
# read from the main table first
wnotvar, = numpy.where(isvar == False) # noqa
if wnotvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wnotvar]
theseoff = offsets[wnotvar]
self._FITS.read_columns_as_rec_byoffset(self._ext+1,
thesecol,
theseoff,
array,
rows)
for i in xrange(thesecol.size):
name = array.dtype.names[wnotvar[i]]
colnum = thesecol[i]-1
self._rescale_and_convert_field_inplace(
array,
name,
self._info['colinfo'][colnum]['tscale'],
self._info['colinfo'][colnum]['tzero'])
array = self._maybe_decode_fits_ascii_strings_to_unicode_py3(array)
# now read the variable length arrays we may be able to speed this up
# by storing directly instead of reading first into a list
wvar, = numpy.where(isvar == True) # noqa
if wvar.size > 0:
# this will be contiguous (not true for slices)
thesecol = colnumsp[wvar]
for i in xrange(thesecol.size):
colnump = thesecol[i]
name = array.dtype.names[wvar[i]]
dlist = self._FITS.read_var_column_as_list(
self._ext+1, colnump, rows)
if (isinstance(dlist[0], str) or
(IS_PY3 and isinstance(dlist[0], bytes))):
is_string = True
else:
is_string = False
if array[name].dtype.descr[0][1][1] == 'O':
# storing in object array
# get references to each, no copy made
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
array[name][irow] = item
else:
for irow, item in enumerate(dlist):
if IS_PY3 and isinstance(item, bytes):
item = item.decode('ascii')
if is_string:
array[name][irow] = item
else:
ncopy = len(item)
if IS_PY3:
ts = array[name].dtype.descr[0][1][1]
if ts != 'S' and ts != 'U':
array[name][irow][0:ncopy] = item[:]
else:
array[name][irow] = item
else:
array[name][irow][0:ncopy] = item[:]
return array | [
"def",
"_read_rec_with_var",
"(",
"self",
",",
"colnums",
",",
"rows",
",",
"dtype",
",",
"offsets",
",",
"isvar",
",",
"vstorage",
")",
":",
"colnumsp",
"=",
"colnums",
"+",
"1",
"if",
"rows",
"is",
"None",
":",
"nrows",
"=",
"self",
".",
"_info",
"[",
"'nrows'",
"]",
"else",
":",
"nrows",
"=",
"rows",
".",
"size",
"array",
"=",
"numpy",
".",
"zeros",
"(",
"nrows",
",",
"dtype",
"=",
"dtype",
")",
"# read from the main table first",
"wnotvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"False",
")",
"# noqa",
"if",
"wnotvar",
".",
"size",
">",
"0",
":",
"# this will be contiguous (not true for slices)",
"thesecol",
"=",
"colnumsp",
"[",
"wnotvar",
"]",
"theseoff",
"=",
"offsets",
"[",
"wnotvar",
"]",
"self",
".",
"_FITS",
".",
"read_columns_as_rec_byoffset",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"thesecol",
",",
"theseoff",
",",
"array",
",",
"rows",
")",
"for",
"i",
"in",
"xrange",
"(",
"thesecol",
".",
"size",
")",
":",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"wnotvar",
"[",
"i",
"]",
"]",
"colnum",
"=",
"thesecol",
"[",
"i",
"]",
"-",
"1",
"self",
".",
"_rescale_and_convert_field_inplace",
"(",
"array",
",",
"name",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tscale'",
"]",
",",
"self",
".",
"_info",
"[",
"'colinfo'",
"]",
"[",
"colnum",
"]",
"[",
"'tzero'",
"]",
")",
"array",
"=",
"self",
".",
"_maybe_decode_fits_ascii_strings_to_unicode_py3",
"(",
"array",
")",
"# now read the variable length arrays we may be able to speed this up",
"# by storing directly instead of reading first into a list",
"wvar",
",",
"=",
"numpy",
".",
"where",
"(",
"isvar",
"==",
"True",
")",
"# noqa",
"if",
"wvar",
".",
"size",
">",
"0",
":",
"# this will be contiguous (not true for slices)",
"thesecol",
"=",
"colnumsp",
"[",
"wvar",
"]",
"for",
"i",
"in",
"xrange",
"(",
"thesecol",
".",
"size",
")",
":",
"colnump",
"=",
"thesecol",
"[",
"i",
"]",
"name",
"=",
"array",
".",
"dtype",
".",
"names",
"[",
"wvar",
"[",
"i",
"]",
"]",
"dlist",
"=",
"self",
".",
"_FITS",
".",
"read_var_column_as_list",
"(",
"self",
".",
"_ext",
"+",
"1",
",",
"colnump",
",",
"rows",
")",
"if",
"(",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"str",
")",
"or",
"(",
"IS_PY3",
"and",
"isinstance",
"(",
"dlist",
"[",
"0",
"]",
",",
"bytes",
")",
")",
")",
":",
"is_string",
"=",
"True",
"else",
":",
"is_string",
"=",
"False",
"if",
"array",
"[",
"name",
"]",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
"]",
"==",
"'O'",
":",
"# storing in object array",
"# get references to each, no copy made",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"for",
"irow",
",",
"item",
"in",
"enumerate",
"(",
"dlist",
")",
":",
"if",
"IS_PY3",
"and",
"isinstance",
"(",
"item",
",",
"bytes",
")",
":",
"item",
"=",
"item",
".",
"decode",
"(",
"'ascii'",
")",
"if",
"is_string",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"ncopy",
"=",
"len",
"(",
"item",
")",
"if",
"IS_PY3",
":",
"ts",
"=",
"array",
"[",
"name",
"]",
".",
"dtype",
".",
"descr",
"[",
"0",
"]",
"[",
"1",
"]",
"[",
"1",
"]",
"if",
"ts",
"!=",
"'S'",
"and",
"ts",
"!=",
"'U'",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"[",
"0",
":",
"ncopy",
"]",
"=",
"item",
"[",
":",
"]",
"else",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"=",
"item",
"else",
":",
"array",
"[",
"name",
"]",
"[",
"irow",
"]",
"[",
"0",
":",
"ncopy",
"]",
"=",
"item",
"[",
":",
"]",
"return",
"array"
] | Read columns from a table into a rec array, including variable length
columns. This is special because, for efficiency, it involves reading
from the main table as normal but skipping the columns in the array
that are variable. Then reading the variable length columns, with
accounting for strides appropriately.
row and column numbers should be checked before calling this function | [
"Read",
"columns",
"from",
"a",
"table",
"into",
"a",
"rec",
"array",
"including",
"variable",
"length",
"columns",
".",
"This",
"is",
"special",
"because",
"for",
"efficiency",
"it",
"involves",
"reading",
"from",
"the",
"main",
"table",
"as",
"normal",
"but",
"skipping",
"the",
"columns",
"in",
"the",
"array",
"that",
"are",
"variable",
".",
"Then",
"reading",
"the",
"variable",
"length",
"columns",
"with",
"accounting",
"for",
"strides",
"appropriately",
"."
] | a6f07919f457a282fe240adad9d2c30906b71a15 | https://github.com/esheldon/fitsio/blob/a6f07919f457a282fe240adad9d2c30906b71a15/fitsio/hdu/table.py#L1102-L1188 | train | 234,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.