idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
46,000 | def warning ( self , amplexception ) : msg = '\t' + str ( amplexception ) . replace ( '\n' , '\n\t' ) print ( 'Warning:\n{:s}' . format ( msg ) ) | Receives notification of a warning . |
46,001 | def register_magics ( store_name = '_ampl_cells' , ampl_object = None ) : from IPython . core . magic import ( Magics , magics_class , cell_magic , line_magic ) @ magics_class class StoreAMPL ( Magics ) : def __init__ ( self , shell = None , ** kwargs ) : Magics . __init__ ( self , shell = shell , ** kwargs ) self . _store = [ ] shell . user_ns [ store_name ] = self . _store @ cell_magic def ampl ( self , line , cell ) : self . _store . append ( cell ) @ cell_magic def ampl_eval ( self , line , cell ) : ampl_object . eval ( cell ) @ line_magic def get_ampl ( self , line ) : return self . _store get_ipython ( ) . register_magics ( StoreAMPL ) | Register jupyter notebook magics %%ampl and %%ampl_eval . |
46,002 | def fix ( self , value = None ) : if value is None : self . _impl . fix ( ) else : self . _impl . fix ( value ) | Fix all instances of this variable to a value if provided or to their current value otherwise . |
46,003 | def toVName ( name , stripNum = 0 , upper = False ) : if upper : name = name . upper ( ) if stripNum != 0 : name = name [ : - stripNum ] return name . replace ( '_' , '-' ) | Turn a Python name into an iCalendar style name optionally uppercase and with characters stripped off . |
46,004 | def readComponents ( streamOrString , validate = False , transform = True , ignoreUnreadable = False , allowQP = False ) : if isinstance ( streamOrString , basestring ) : stream = six . StringIO ( streamOrString ) else : stream = streamOrString try : stack = Stack ( ) versionLine = None n = 0 for line , n in getLogicalLines ( stream , allowQP ) : if ignoreUnreadable : try : vline = textLineToContentLine ( line , n ) except VObjectError as e : if e . lineNumber is not None : msg = "Skipped line {lineNumber}, message: {msg}" else : msg = "Skipped a line, message: {msg}" logger . error ( msg . format ( ** { 'lineNumber' : e . lineNumber , 'msg' : str ( e ) } ) ) continue else : vline = textLineToContentLine ( line , n ) if vline . name == "VERSION" : versionLine = vline stack . modifyTop ( vline ) elif vline . name == "BEGIN" : stack . push ( Component ( vline . value , group = vline . group ) ) elif vline . name == "PROFILE" : if not stack . top ( ) : stack . push ( Component ( ) ) stack . top ( ) . setProfile ( vline . value ) elif vline . name == "END" : if len ( stack ) == 0 : err = "Attempted to end the {0} component but it was never opened" raise ParseError ( err . format ( vline . value ) , n ) if vline . value . upper ( ) == stack . topName ( ) : if len ( stack ) == 1 : component = stack . pop ( ) if versionLine is not None : component . setBehaviorFromVersionLine ( versionLine ) else : behavior = getBehavior ( component . name ) if behavior : component . setBehavior ( behavior ) if validate : component . validate ( raiseException = True ) if transform : component . transformChildrenToNative ( ) yield component else : stack . modifyTop ( stack . pop ( ) ) else : err = "{0} component wasn't closed" raise ParseError ( err . format ( stack . topName ( ) ) , n ) else : stack . modifyTop ( vline ) if stack . top ( ) : if stack . topName ( ) is None : logger . warning ( "Top level component was never named" ) elif stack . top ( ) . useBegin : raise ParseError ( "Component {0!s} was never closed" . format ( ( stack . topName ( ) ) ) , n ) yield stack . pop ( ) except ParseError as e : e . input = streamOrString raise | Generate one Component at a time from a stream . |
46,005 | def readOne ( stream , validate = False , transform = True , ignoreUnreadable = False , allowQP = False ) : return next ( readComponents ( stream , validate , transform , ignoreUnreadable , allowQP ) ) | Return the first component from stream . |
46,006 | def registerBehavior ( behavior , name = None , default = False , id = None ) : if not name : name = behavior . name . upper ( ) if id is None : id = behavior . versionString if name in __behaviorRegistry : if default : __behaviorRegistry [ name ] . insert ( 0 , ( id , behavior ) ) else : __behaviorRegistry [ name ] . append ( ( id , behavior ) ) else : __behaviorRegistry [ name ] = [ ( id , behavior ) ] | Register the given behavior . |
46,007 | def getBehavior ( name , id = None ) : name = name . upper ( ) if name in __behaviorRegistry : if id : for n , behavior in __behaviorRegistry [ name ] : if n == id : return behavior return __behaviorRegistry [ name ] [ 0 ] [ 1 ] return None | Return a matching behavior if it exists or None . |
46,008 | def validate ( self , * args , ** kwds ) : if self . behavior : return self . behavior . validate ( self , * args , ** kwds ) return True | Call the behavior s validate method or return True . |
46,009 | def autoBehavior ( self , cascade = False ) : parentBehavior = self . parentBehavior if parentBehavior is not None : knownChildTup = parentBehavior . knownChildren . get ( self . name , None ) if knownChildTup is not None : behavior = getBehavior ( self . name , knownChildTup [ 2 ] ) if behavior is not None : self . setBehavior ( behavior , cascade ) if isinstance ( self , ContentLine ) and self . encoded : self . behavior . decode ( self ) elif isinstance ( self , ContentLine ) : self . behavior = parentBehavior . defaultBehavior if self . encoded and self . behavior : self . behavior . decode ( self ) | Set behavior if name is in self . parentBehavior . knownChildren . |
46,010 | def setBehavior ( self , behavior , cascade = True ) : self . behavior = behavior if cascade : for obj in self . getChildren ( ) : obj . parentBehavior = behavior obj . autoBehavior ( True ) | Set behavior . If cascade is True autoBehavior all descendants . |
46,011 | def serialize ( self , buf = None , lineLength = 75 , validate = True , behavior = None ) : if not behavior : behavior = self . behavior if behavior : if DEBUG : logger . debug ( "serializing {0!s} with behavior {1!s}" . format ( self . name , behavior ) ) return behavior . serialize ( self , buf , lineLength , validate ) else : if DEBUG : logger . debug ( "serializing {0!s} without behavior" . format ( self . name ) ) return defaultSerialize ( self , buf , lineLength ) | Serialize to buf if it exists otherwise return a string . |
46,012 | def valueRepr ( self ) : v = self . value if self . behavior : v = self . behavior . valueRepr ( self ) return v | Transform the representation of the value according to the behavior if any . |
46,013 | def setProfile ( self , name ) : if self . name or self . useBegin : if self . name == name : return raise VObjectError ( "This component already has a PROFILE or " "uses BEGIN." ) self . name = name . upper ( ) | Assign a PROFILE to this unnamed component . |
46,014 | def add ( self , objOrName , group = None ) : if isinstance ( objOrName , VBase ) : obj = objOrName if self . behavior : obj . parentBehavior = self . behavior obj . autoBehavior ( True ) else : name = objOrName . upper ( ) try : id = self . behavior . knownChildren [ name ] [ 2 ] behavior = getBehavior ( name , id ) if behavior . isComponent : obj = Component ( name ) else : obj = ContentLine ( name , [ ] , '' , group ) obj . parentBehavior = self . behavior obj . behavior = behavior obj = obj . transformToNative ( ) except ( KeyError , AttributeError ) : obj = ContentLine ( objOrName , [ ] , '' , group ) if obj . behavior is None and self . behavior is not None : if isinstance ( obj , ContentLine ) : obj . behavior = self . behavior . defaultBehavior self . contents . setdefault ( obj . name . lower ( ) , [ ] ) . append ( obj ) return obj | Add objOrName to contents set behavior if it can be inferred . |
46,015 | def remove ( self , obj ) : named = self . contents . get ( obj . name . lower ( ) ) if named : try : named . remove ( obj ) if len ( named ) == 0 : del self . contents [ obj . name . lower ( ) ] except ValueError : pass | Remove obj from contents . |
46,016 | def setBehaviorFromVersionLine ( self , versionLine ) : v = getBehavior ( self . name , versionLine . value ) if v : self . setBehavior ( v ) | Set behavior if one matches name versionLine . value . |
46,017 | def transformChildrenToNative ( self ) : for childArray in ( self . contents [ k ] for k in self . sortChildKeys ( ) ) : for child in childArray : child = child . transformToNative ( ) child . transformChildrenToNative ( ) | Recursively replace children with their native representation . |
46,018 | def transformChildrenFromNative ( self , clearBehavior = True ) : for childArray in self . contents . values ( ) : for child in childArray : child = child . transformFromNative ( ) child . transformChildrenFromNative ( clearBehavior ) if clearBehavior : child . behavior = None child . parentBehavior = None | Recursively transform native children to vanilla representations . |
46,019 | def change_tz ( cal , new_timezone , default , utc_only = False , utc_tz = icalendar . utc ) : for vevent in getattr ( cal , 'vevent_list' , [ ] ) : start = getattr ( vevent , 'dtstart' , None ) end = getattr ( vevent , 'dtend' , None ) for node in ( start , end ) : if node : dt = node . value if ( isinstance ( dt , datetime ) and ( not utc_only or dt . tzinfo == utc_tz ) ) : if dt . tzinfo is None : dt = dt . replace ( tzinfo = default ) node . value = dt . astimezone ( new_timezone ) | Change the timezone of the specified component . |
46,020 | def defaultSerialize ( obj , buf , lineLength ) : outbuf = buf or six . StringIO ( ) if isinstance ( obj , Component ) : if obj . group is None : groupString = '' else : groupString = obj . group + '.' if obj . useBegin : foldOneLine ( outbuf , "{0}BEGIN:{1}" . format ( groupString , obj . name ) , lineLength ) for child in obj . getSortedChildren ( ) : child . serialize ( outbuf , lineLength , validate = False ) if obj . useBegin : foldOneLine ( outbuf , "{0}END:{1}" . format ( groupString , obj . name ) , lineLength ) elif isinstance ( obj , ContentLine ) : startedEncoded = obj . encoded if obj . behavior and not startedEncoded : obj . behavior . encode ( obj ) s = six . StringIO ( ) if obj . group is not None : s . write ( obj . group + '.' ) s . write ( obj . name . upper ( ) ) keys = sorted ( obj . params . keys ( ) ) for key in keys : paramstr = ',' . join ( dquoteEscape ( p ) for p in obj . params [ key ] ) s . write ( ";{0}={1}" . format ( key , paramstr ) ) s . write ( ":{0}" . format ( str_ ( obj . value ) ) ) if obj . behavior and not startedEncoded : obj . behavior . decode ( obj ) foldOneLine ( outbuf , s . getvalue ( ) , lineLength ) return buf or outbuf . getvalue ( ) | Encode and fold obj and its children write to buf or return a string . |
46,021 | def toUnicode ( s ) : if isinstance ( s , six . binary_type ) : s = s . decode ( 'utf-8' ) return s | Take a string or unicode turn it into unicode decoding as utf - 8 |
46,022 | def numToDigits ( num , places ) : s = str ( num ) if len ( s ) < places : return ( "0" * ( places - len ( s ) ) ) + s elif len ( s ) > places : return s [ len ( s ) - places : ] else : return s | Helper for converting numbers to textual digits . |
46,023 | def timedeltaToString ( delta ) : if delta . days == 0 : sign = 1 else : sign = delta . days / abs ( delta . days ) delta = abs ( delta ) days = delta . days hours = int ( delta . seconds / 3600 ) minutes = int ( ( delta . seconds % 3600 ) / 60 ) seconds = int ( delta . seconds % 60 ) output = '' if sign == - 1 : output += '-' output += 'P' if days : output += '{}D' . format ( days ) if hours or minutes or seconds : output += 'T' elif not days : output += 'T0S' if hours : output += '{}H' . format ( hours ) if minutes : output += '{}M' . format ( minutes ) if seconds : output += '{}S' . format ( seconds ) return output | Convert timedelta to an ical DURATION . |
46,024 | def stringToTextValues ( s , listSeparator = ',' , charList = None , strict = False ) : if charList is None : charList = escapableCharList def escapableChar ( c ) : return c in charList def error ( msg ) : if strict : raise ParseError ( msg ) else : logging . error ( msg ) charIterator = enumerate ( s ) state = "read normal" current = [ ] results = [ ] while True : try : charIndex , char = next ( charIterator ) except : char = "eof" if state == "read normal" : if char == '\\' : state = "read escaped char" elif char == listSeparator : state = "read normal" current = "" . join ( current ) results . append ( current ) current = [ ] elif char == "eof" : state = "end" else : state = "read normal" current . append ( char ) elif state == "read escaped char" : if escapableChar ( char ) : state = "read normal" if char in 'nN' : current . append ( '\n' ) else : current . append ( char ) else : state = "read normal" current . append ( '\\' + char ) elif state == "end" : if len ( current ) or len ( results ) == 0 : current = "" . join ( current ) results . append ( current ) return results elif state == "error" : return results else : state = "error" error ( "unknown state: '{0!s}' reached in {1!s}" . format ( state , s ) ) | Returns list of strings . |
46,025 | def parseDtstart ( contentline , allowSignatureMismatch = False ) : tzinfo = getTzid ( getattr ( contentline , 'tzid_param' , None ) ) valueParam = getattr ( contentline , 'value_param' , 'DATE-TIME' ) . upper ( ) if valueParam == "DATE" : return stringToDate ( contentline . value ) elif valueParam == "DATE-TIME" : try : return stringToDateTime ( contentline . value , tzinfo ) except : if allowSignatureMismatch : return stringToDate ( contentline . value ) else : raise | Convert a contentline s value into a date or date - time . |
46,026 | def tzinfo_eq ( tzinfo1 , tzinfo2 , startYear = 2000 , endYear = 2020 ) : if tzinfo1 == tzinfo2 : return True elif tzinfo1 is None or tzinfo2 is None : return False def dt_test ( dt ) : if dt is None : return True return tzinfo1 . utcoffset ( dt ) == tzinfo2 . utcoffset ( dt ) if not dt_test ( datetime . datetime ( startYear , 1 , 1 ) ) : return False for year in range ( startYear , endYear ) : for transitionTo in 'daylight' , 'standard' : t1 = getTransition ( transitionTo , year , tzinfo1 ) t2 = getTransition ( transitionTo , year , tzinfo2 ) if t1 != t2 or not dt_test ( t1 ) : return False return True | Compare offsets and DST transitions from startYear to endYear . |
46,027 | def registerTzinfo ( obj , tzinfo ) : tzid = obj . pickTzid ( tzinfo ) if tzid and not getTzid ( tzid , False ) : registerTzid ( tzid , tzinfo ) return tzid | Register tzinfo if it s not already registered return its tzid . |
46,028 | def pickTzid ( tzinfo , allowUTC = False ) : if tzinfo is None or ( not allowUTC and tzinfo_eq ( tzinfo , utc ) ) : return None if hasattr ( tzinfo , 'tzid' ) : return toUnicode ( tzinfo . tzid ) if hasattr ( tzinfo , 'zone' ) : return toUnicode ( tzinfo . zone ) elif hasattr ( tzinfo , '_tzid' ) : return toUnicode ( tzinfo . _tzid ) else : notDST = datetime . timedelta ( 0 ) for month in range ( 1 , 13 ) : dt = datetime . datetime ( 2000 , month , 1 ) if tzinfo . dst ( dt ) == notDST : return toUnicode ( tzinfo . tzname ( dt ) ) raise VObjectError ( "Unable to guess TZID for tzinfo {0!s}" . format ( tzinfo ) ) | Given a tzinfo class use known APIs to determine TZID or use tzname . |
46,029 | def transformToNative ( obj ) : if not obj . isNative : object . __setattr__ ( obj , '__class__' , RecurringComponent ) obj . isNative = True return obj | Turn a recurring Component into a RecurringComponent . |
46,030 | def generateImplicitParameters ( obj ) : if not hasattr ( obj , 'uid' ) : rand = int ( random . random ( ) * 100000 ) now = datetime . datetime . now ( utc ) now = dateTimeToString ( now ) host = socket . gethostname ( ) obj . add ( ContentLine ( 'UID' , [ ] , "{0} - {1}@{2}" . format ( now , rand , host ) ) ) | Generate a UID if one does not exist . |
46,031 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True if obj . value == '' : return obj obj . value = obj . value obj . value = parseDtstart ( obj , allowSignatureMismatch = True ) if getattr ( obj , 'value_param' , 'DATE-TIME' ) . upper ( ) == 'DATE-TIME' : if hasattr ( obj , 'tzid_param' ) : obj . params [ 'X-VOBJ-ORIGINAL-TZID' ] = [ obj . tzid_param ] del obj . tzid_param return obj | Turn obj . value into a date or datetime . |
46,032 | def transformFromNative ( obj ) : if type ( obj . value ) == datetime . date : obj . isNative = False obj . value_param = 'DATE' obj . value = dateToString ( obj . value ) return obj else : return DateTimeBehavior . transformFromNative ( obj ) | Replace the date or datetime in obj . value with an ISO 8601 string . |
46,033 | def transformFromNative ( obj ) : if obj . value and type ( obj . value [ 0 ] ) == datetime . date : obj . isNative = False obj . value_param = 'DATE' obj . value = ',' . join ( [ dateToString ( val ) for val in obj . value ] ) return obj else : if obj . isNative : obj . isNative = False transformed = [ ] tzid = None for val in obj . value : if tzid is None and type ( val ) == datetime . datetime : tzid = TimezoneComponent . registerTzinfo ( val . tzinfo ) if tzid is not None : obj . tzid_param = tzid transformed . append ( dateTimeToString ( val ) ) obj . value = ',' . join ( transformed ) return obj | Replace the date datetime or period tuples in obj . value with appropriate strings . |
46,034 | def decode ( cls , line ) : if line . encoded : line . value = stringToTextValues ( line . value , listSeparator = cls . listSeparator ) line . encoded = False | Remove backslash escaping from line . value then split on commas . |
46,035 | def generateImplicitParameters ( obj ) : try : obj . action except AttributeError : obj . add ( 'action' ) . value = 'AUDIO' try : obj . trigger except AttributeError : obj . add ( 'trigger' ) . value = datetime . timedelta ( 0 ) | Create default ACTION and TRIGGER if they re not set . |
46,036 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = obj . value if obj . value == '' : return obj else : deltalist = stringToDurations ( obj . value ) if len ( deltalist ) == 1 : obj . value = deltalist [ 0 ] return obj else : raise ParseError ( "DURATION must have a single duration string." ) | Turn obj . value into a datetime . timedelta . |
46,037 | def transformFromNative ( obj ) : if not obj . isNative : return obj obj . isNative = False obj . value = timedeltaToString ( obj . value ) return obj | Replace the datetime . timedelta in obj . value with an RFC2445 string . |
46,038 | def transformToNative ( obj ) : if obj . isNative : return obj value = getattr ( obj , 'value_param' , 'DURATION' ) . upper ( ) if hasattr ( obj , 'value_param' ) : del obj . value_param if obj . value == '' : obj . isNative = True return obj elif value == 'DURATION' : try : return Duration . transformToNative ( obj ) except ParseError : logger . warning ( "TRIGGER not recognized as DURATION, trying " "DATE-TIME, because iCal sometimes exports " "DATE-TIMEs without setting VALUE=DATE-TIME" ) try : obj . isNative = False dt = DateTimeBehavior . transformToNative ( obj ) return dt except : msg = "TRIGGER with no VALUE not recognized as DURATION " "or as DATE-TIME" raise ParseError ( msg ) elif value == 'DATE-TIME' : return DateTimeBehavior . transformToNative ( obj ) else : raise ParseError ( "VALUE must be DURATION or DATE-TIME" ) | Turn obj . value into a timedelta or datetime . |
46,039 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True if obj . value == '' : obj . value = [ ] return obj tzinfo = getTzid ( getattr ( obj , 'tzid_param' , None ) ) obj . value = [ stringToPeriod ( x , tzinfo ) for x in obj . value . split ( "," ) ] return obj | Convert comma separated periods into tuples . |
46,040 | def transformFromNative ( cls , obj ) : if obj . isNative : obj . isNative = False transformed = [ ] for tup in obj . value : transformed . append ( periodToString ( tup , cls . forceUTC ) ) if len ( transformed ) > 0 : tzid = TimezoneComponent . registerTzinfo ( tup [ 0 ] . tzinfo ) if not cls . forceUTC and tzid is not None : obj . tzid_param = tzid obj . value = ',' . join ( transformed ) return obj | Convert the list of tuples in obj . value to strings . |
46,041 | def serializeFields ( obj , order = None ) : fields = [ ] if order is None : fields = [ backslashEscape ( val ) for val in obj ] else : for field in order : escapedValueList = [ backslashEscape ( val ) for val in toList ( getattr ( obj , field ) ) ] fields . append ( ',' . join ( escapedValueList ) ) return ';' . join ( fields ) | Turn an object s fields into a ; and seperated string . |
46,042 | def toString ( val , join_char = '\n' ) : if type ( val ) in ( list , tuple ) : return join_char . join ( val ) return val | Turn a string or array value into a string . |
46,043 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = Name ( ** dict ( zip ( NAME_ORDER , splitFields ( obj . value ) ) ) ) return obj | Turn obj . value into a Name . |
46,044 | def transformFromNative ( obj ) : obj . isNative = False obj . value = serializeFields ( obj . value , NAME_ORDER ) return obj | Replace the Name in obj . value with a string . |
46,045 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = Address ( ** dict ( zip ( ADDRESS_ORDER , splitFields ( obj . value ) ) ) ) return obj | Turn obj . value into an Address . |
46,046 | def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = splitFields ( obj . value ) return obj | Turn obj . value into a list . |
46,047 | def decode ( cls , line ) : if line . encoded : if 'BASE64' in line . singletonparams : line . singletonparams . remove ( 'BASE64' ) line . encoding_param = cls . base64string encoding = getattr ( line , 'encoding_param' , None ) if encoding : line . value = codecs . decode ( line . value . encode ( "utf-8" ) , "base64" ) else : line . value = stringToTextValues ( line . value ) [ 0 ] line . encoded = False | Remove backslash escaping from line . valueDecode line either to remove backslash espacing or to decode base64 encoding . The content line should contain a ENCODING = b for base64 encoding but Apple Addressbook seems to export a singleton parameter of BASE64 which does not match the 3 . 0 vCard spec . If we encouter that then we transform the parameter to ENCODING = b |
46,048 | def validate ( cls , obj , raiseException = False , complainUnrecognized = False ) : if not cls . allowGroup and obj . group is not None : err = "{0} has a group, but this object doesn't support groups" . format ( obj ) raise base . VObjectError ( err ) if isinstance ( obj , base . ContentLine ) : return cls . lineValidate ( obj , raiseException , complainUnrecognized ) elif isinstance ( obj , base . Component ) : count = { } for child in obj . getChildren ( ) : if not child . validate ( raiseException , complainUnrecognized ) : return False name = child . name . upper ( ) count [ name ] = count . get ( name , 0 ) + 1 for key , val in cls . knownChildren . items ( ) : if count . get ( key , 0 ) < val [ 0 ] : if raiseException : m = "{0} components must contain at least {1} {2}" raise base . ValidateError ( m . format ( cls . name , val [ 0 ] , key ) ) return False if val [ 1 ] and count . get ( key , 0 ) > val [ 1 ] : if raiseException : m = "{0} components cannot contain more than {1} {2}" raise base . ValidateError ( m . format ( cls . name , val [ 1 ] , key ) ) return False return True else : err = "{0} is not a Component or Contentline" . format ( obj ) raise base . VObjectError ( err ) | Check if the object satisfies this behavior s requirements . |
46,049 | def pickNthWeekday ( year , month , dayofweek , hour , minute , whichweek ) : first = datetime . datetime ( year = year , month = month , hour = hour , minute = minute , day = 1 ) weekdayone = first . replace ( day = ( ( dayofweek - first . isoweekday ( ) ) % 7 + 1 ) ) for n in xrange ( whichweek - 1 , - 1 , - 1 ) : dt = weekdayone + n * WEEKS if dt . month == month : return dt | dayofweek == 0 means Sunday whichweek > 4 means last instance |
46,050 | def deleteExtraneous ( component , ignore_dtstamp = False ) : for comp in component . components ( ) : deleteExtraneous ( comp , ignore_dtstamp ) for line in component . lines ( ) : if 'X-VOBJ-ORIGINAL-TZID' in line . params : del line . params [ 'X-VOBJ-ORIGINAL-TZID' ] if ignore_dtstamp and hasattr ( component , 'dtstamp_list' ) : del component . dtstamp_list | Recursively walk the component s children deleting extraneous details like X - VOBJ - ORIGINAL - TZID . |
46,051 | def fillPelicanHole ( site , username , password , tstat_name , start_time , end_time ) : start = datetime . strptime ( start_time , _INPUT_TIME_FORMAT ) . replace ( tzinfo = pytz . utc ) . astimezone ( _pelican_time ) end = datetime . strptime ( end_time , _INPUT_TIME_FORMAT ) . replace ( tzinfo = pytz . utc ) . astimezone ( _pelican_time ) heat_needs_fan = _lookupHeatNeedsFan ( site , username , password , tstat_name ) if heat_needs_fan is None : return None history_blocks = [ ] while start < end : block_start = start block_end = min ( start + timedelta ( days = 30 ) , end ) blocks = _lookupHistoricalData ( site , username , password , tstat_name , block_start , block_end ) if blocks is None : return None history_blocks . extend ( blocks ) start += timedelta ( days = 30 , minutes = 1 ) output_rows = [ ] for block in history_blocks : runStatus = block . find ( "runStatus" ) . text if runStatus . startswith ( "Heat" ) : fanState = ( heatNeedsFan == "Yes" ) else : fanState = ( runStatus != "Off" ) api_time = datetime . strptime ( block . find ( "timestamp" ) . text , "%Y-%m-%dT%H:%M" ) . replace ( tzinfo = _pelican_time ) timestamp = int ( api_time . timestamp ( ) * 10 ** 9 ) output_rows . append ( { "temperature" : float ( block . find ( "temperature" ) . text ) , "relative_humidity" : float ( block . find ( "humidity" ) . text ) , "heating_setpoint" : float ( block . find ( "heatSetting" ) . text ) , "cooling_setpoint" : float ( block . find ( "coolSetting" ) . text ) , "override" : block . find ( "setBy" ) . text != "Schedule" , "fan" : fanState , "mode" : _mode_name_mappings [ block . find ( "system" ) . text ] , "state" : _state_mappings . get ( runStatus , 0 ) , "time" : timestamp , } ) df = pd . DataFrame ( output_rows ) df . drop_duplicates ( subset = "time" , keep = "first" , inplace = True ) return df | Fill a hole in a Pelican thermostat s data stream . |
46,052 | def add_degree_days ( self , col = 'OAT' , hdh_cpoint = 65 , cdh_cpoint = 65 ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data data [ 'hdh' ] = data [ col ] over_hdh = data . loc [ : , col ] > hdh_cpoint data . loc [ over_hdh , 'hdh' ] = 0 data . loc [ ~ over_hdh , 'hdh' ] = hdh_cpoint - data . loc [ ~ over_hdh , col ] data [ 'cdh' ] = data [ col ] under_cdh = data . loc [ : , col ] < cdh_cpoint data . loc [ under_cdh , 'cdh' ] = 0 data . loc [ ~ under_cdh , 'cdh' ] = data . loc [ ~ under_cdh , col ] - cdh_cpoint self . preprocessed_data = data | Adds Heating & Cooling Degree Hours . |
46,053 | def add_col_features ( self , col = None , degree = None ) : if not col and not degree : return else : if isinstance ( col , list ) and isinstance ( degree , list ) : if len ( col ) != len ( degree ) : print ( 'col len: ' , len ( col ) ) print ( 'degree len: ' , len ( degree ) ) raise ValueError ( 'col and degree should have equal length.' ) else : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data for i in range ( len ( col ) ) : data . loc [ : , col [ i ] + str ( degree [ i ] ) ] = pow ( data . loc [ : , col [ i ] ] , degree [ i ] ) / pow ( 10 , degree [ i ] - 1 ) self . preprocessed_data = data else : raise TypeError ( 'col and degree should be lists.' ) | Exponentiate columns of dataframe . |
46,054 | def standardize ( self ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data scaler = preprocessing . StandardScaler ( ) data = pd . DataFrame ( scaler . fit_transform ( data ) , columns = data . columns , index = data . index ) self . preprocessed_data = data | Standardize data . |
46,055 | def normalize ( self ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data data = pd . DataFrame ( preprocessing . normalize ( data ) , columns = data . columns , index = data . index ) self . preprocessed_data = data | Normalize data . |
46,056 | def add_time_features ( self , year = False , month = False , week = True , tod = True , dow = True ) : var_to_expand = [ ] if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data if year : data [ "year" ] = data . index . year var_to_expand . append ( "year" ) if month : data [ "month" ] = data . index . month var_to_expand . append ( "month" ) if week : data [ "week" ] = data . index . week var_to_expand . append ( "week" ) if tod : data [ "tod" ] = data . index . hour var_to_expand . append ( "tod" ) if dow : data [ "dow" ] = data . index . weekday var_to_expand . append ( "dow" ) for var in var_to_expand : add_var = pd . get_dummies ( data [ var ] , prefix = var , drop_first = True ) data = data . join ( add_var ) data . drop ( columns = [ var ] , inplace = True ) self . preprocessed_data = data | Add time features to dataframe . |
46,057 | def split_data ( self ) : try : time_period1 = ( slice ( self . baseline_period [ 0 ] , self . baseline_period [ 1 ] ) ) self . baseline_in = self . original_data . loc [ time_period1 , self . input_col ] self . baseline_out = self . original_data . loc [ time_period1 , self . output_col ] if self . exclude_time_period : for i in range ( 0 , len ( self . exclude_time_period ) , 2 ) : exclude_time_period1 = ( slice ( self . exclude_time_period [ i ] , self . exclude_time_period [ i + 1 ] ) ) self . baseline_in . drop ( self . baseline_in . loc [ exclude_time_period1 ] . index , axis = 0 , inplace = True ) self . baseline_out . drop ( self . baseline_out . loc [ exclude_time_period1 ] . index , axis = 0 , inplace = True ) except Exception as e : raise e if self . projection_period : for i in range ( 0 , len ( self . projection_period ) , 2 ) : period = ( slice ( self . projection_period [ i ] , self . projection_period [ i + 1 ] ) ) try : self . original_data . loc [ period , self . input_col ] self . original_data . loc [ period , self . output_col ] except Exception as e : raise e | Split data according to baseline and projection time period values . |
46,058 | def linear_regression ( self ) : model = LinearRegression ( ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = sum ( scores ) / len ( scores ) self . models . append ( model ) self . model_names . append ( 'Linear Regression' ) self . max_scores . append ( mean_score ) self . metrics [ 'Linear Regression' ] = { } self . metrics [ 'Linear Regression' ] [ 'R2' ] = mean_score self . metrics [ 'Linear Regression' ] [ 'Adj R2' ] = self . adj_r2 ( mean_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] ) | Linear Regression . |
46,059 | def lasso_regression ( self ) : score_list = [ ] max_score = float ( '-inf' ) best_alpha = None for alpha in self . alphas : model = Lasso ( alpha = alpha , max_iter = 5000 ) model . fit ( self . baseline_in , self . baseline_out . values . ravel ( ) ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = np . mean ( scores ) score_list . append ( mean_score ) if mean_score > max_score : max_score = mean_score best_alpha = alpha self . models . append ( Lasso ( alpha = best_alpha , max_iter = 5000 ) ) self . model_names . append ( 'Lasso Regression' ) self . max_scores . append ( max_score ) self . metrics [ 'Lasso Regression' ] = { } self . metrics [ 'Lasso Regression' ] [ 'R2' ] = max_score self . metrics [ 'Lasso Regression' ] [ 'Adj R2' ] = self . adj_r2 ( max_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] ) | Lasso Regression . |
46,060 | def random_forest ( self ) : model = RandomForestRegressor ( random_state = 42 ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = np . mean ( scores ) self . models . append ( model ) self . model_names . append ( 'Random Forest Regressor' ) self . max_scores . append ( mean_score ) self . metrics [ 'Random Forest Regressor' ] = { } self . metrics [ 'Random Forest Regressor' ] [ 'R2' ] = mean_score self . metrics [ 'Random Forest Regressor' ] [ 'Adj R2' ] = self . adj_r2 ( mean_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] ) | Random Forest . |
46,061 | def run_models ( self ) : self . linear_regression ( ) self . lasso_regression ( ) self . ridge_regression ( ) self . elastic_net_regression ( ) self . random_forest ( ) self . ann ( ) best_model_index = self . max_scores . index ( max ( self . max_scores ) ) self . best_model_name = self . model_names [ best_model_index ] self . best_model = self . models [ best_model_index ] return self . metrics | Run all models . |
46,062 | def custom_model ( self , func ) : y_pred = func ( self . baseline_in , self . baseline_out ) self . custom_metrics = { } self . custom_metrics [ 'r2' ] = r2_score ( self . baseline_out , y_pred ) self . custom_metrics [ 'mse' ] = mean_squared_error ( self . baseline_out , y_pred ) self . custom_metrics [ 'rmse' ] = math . sqrt ( self . custom_metrics [ 'mse' ] ) self . custom_metrics [ 'adj_r2' ] = self . adj_r2 ( self . custom_metrics [ 'r2' ] , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] ) return self . custom_metrics | Run custom model provided by user . |
46,063 | def best_model_fit ( self ) : self . best_model . fit ( self . baseline_in , self . baseline_out ) self . y_true = self . baseline_out self . y_pred = self . best_model . predict ( self . baseline_in ) self . y_pred [ self . y_pred < 0 ] = 0 self . n_test = self . baseline_in . shape [ 0 ] self . k_test = self . baseline_in . shape [ 1 ] self . best_metrics [ 'name' ] = self . best_model_name self . best_metrics [ 'r2' ] = r2_score ( self . y_true , self . y_pred ) self . best_metrics [ 'mse' ] = mean_squared_error ( self . y_true , self . y_pred ) self . best_metrics [ 'rmse' ] = math . sqrt ( self . best_metrics [ 'mse' ] ) self . best_metrics [ 'adj_r2' ] = self . adj_r2 ( self . best_metrics [ 'r2' ] , self . n_test , self . k_test ) numerator = sum ( self . y_true - self . y_pred ) denominator = ( self . n_test - self . k_test ) * ( sum ( self . y_true ) / len ( self . y_true ) ) self . best_metrics [ 'nmbe' ] = numerator / denominator self . baseline_out_copy = self . baseline_out [ self . baseline_out != 0 ] self . baseline_in_copy = self . baseline_in [ self . baseline_in . index . isin ( self . baseline_out_copy . index ) ] self . y_true_copy = self . baseline_out_copy self . y_pred_copy = self . best_model . predict ( self . baseline_in_copy ) self . best_metrics [ 'mape' ] = np . mean ( np . abs ( ( self . y_true_copy - self . y_pred_copy ) / self . y_true_copy ) ) * 100 return self . best_metrics | Fit data to optimal model and return its metrics . |
46,064 | def correlation_plot ( self , data ) : fig = plt . figure ( Plot_Data . count ) corr = data . corr ( ) ax = sns . heatmap ( corr ) Plot_Data . count += 1 return fig | Create heatmap of Pearson s correlation coefficient . |
46,065 | def baseline_projection_plot ( self , y_true , y_pred , baseline_period , projection_period , model_name , adj_r2 , data , input_col , output_col , model , site ) : fig = plt . figure ( Plot_Data . count ) if projection_period : nrows = len ( baseline_period ) + len ( projection_period ) / 2 else : nrows = len ( baseline_period ) / 2 base_df = pd . DataFrame ( ) base_df [ 'y_true' ] = y_true base_df [ 'y_pred' ] = y_pred ax1 = fig . add_subplot ( nrows , 1 , 1 ) base_df . plot ( ax = ax1 , figsize = self . figsize , title = 'Baseline Period ({}-{}). \nBest Model: {}. \nBaseline Adj R2: {}. \nSite: {}.' . format ( baseline_period [ 0 ] , baseline_period [ 1 ] , model_name , adj_r2 , site ) ) if projection_period : num_plot = 2 for i in range ( 0 , len ( projection_period ) , 2 ) : ax = fig . add_subplot ( nrows , 1 , num_plot ) period = ( slice ( projection_period [ i ] , projection_period [ i + 1 ] ) ) project_df = pd . DataFrame ( ) try : project_df [ 'y_true' ] = data . loc [ period , output_col ] project_df [ 'y_pred' ] = model . predict ( data . loc [ period , input_col ] ) project_df [ 'y_pred' ] [ project_df [ 'y_pred' ] < 0 ] = 0 project_df . plot ( ax = ax , figsize = self . figsize , title = 'Projection Period ({}-{})' . format ( projection_period [ i ] , projection_period [ i + 1 ] ) ) num_plot += 1 fig . tight_layout ( ) Plot_Data . count += 1 return fig , project_df [ 'y_true' ] , project_df [ 'y_pred' ] except : raise TypeError ( "If projecting into the future, please specify project_ind_col that has data available \ in the future time period requested." ) return fig , None , None | Create baseline and projection plots . |
46,066 | def get_thermostat_meter_data ( zone ) : meter_uri = zone2meter . get ( zone , "None" ) data = [ ] def cb ( msg ) : for po in msg . payload_objects : if po . type_dotted == ( 2 , 0 , 9 , 1 ) : m = msgpack . unpackb ( po . content ) data . append ( m [ 'current_demand' ] ) handle = c . subscribe ( meter_uri + "/signal/meter" , cb ) def stop ( ) : c . unsubscribe ( handle ) return data return stop | This method subscribes to the output of the meter for the given zone . It returns a handler to call when you want to stop subscribing data which returns a list of the data readins over that time period |
46,067 | def call_heat ( tstat ) : current_hsp , current_csp = tstat . heating_setpoint , tstat . cooling_setpoint current_temp = tstat . temperature tstat . write ( { 'heating_setpoint' : current_temp + 10 , 'cooling_setpoint' : current_temp + 20 , 'mode' : HEAT , } ) def restore ( ) : tstat . write ( { 'heating_setpoint' : current_hsp , 'cooling_setpoint' : current_csp , 'mode' : AUTO , } ) return restore | Adjusts the temperature setpoints in order to call for heating . Returns a handler to call when you want to reset the thermostat |
46,068 | def call_cool ( tstat ) : current_hsp , current_csp = tstat . heating_setpoint , tstat . cooling_setpoint current_temp = tstat . temperature tstat . write ( { 'heating_setpoint' : current_temp - 20 , 'cooling_setpoint' : current_temp - 10 , 'mode' : COOL , } ) def restore ( ) : tstat . write ( { 'heating_setpoint' : current_hsp , 'cooling_setpoint' : current_csp , 'mode' : AUTO , } ) return restore | Adjusts the temperature setpoints in order to call for cooling . Returns a handler to call when you want to reset the thermostat |
46,069 | def call_fan ( tstat ) : old_fan = tstat . fan tstat . write ( { 'fan' : not old_fan , } ) def restore ( ) : tstat . write ( { 'fan' : old_fan , } ) return restore | Toggles the fan |
46,070 | def _load_csv ( self , file_name , folder_name , head_row , index_col , convert_col , concat_files ) : if file_name == "*" : if not os . path . isdir ( folder_name ) : raise OSError ( 'Folder does not exist.' ) else : file_name_list = sorted ( glob . glob ( folder_name + '*.csv' ) ) if not file_name_list : raise OSError ( 'Either the folder does not contain any csv files or invalid folder provided.' ) else : self . import_csv ( file_name = file_name_list , head_row = head_row , index_col = index_col , convert_col = convert_col , concat_files = concat_files ) return self . data else : if not os . path . isdir ( folder_name ) : raise OSError ( 'Folder does not exist.' ) else : path = os . path . join ( folder_name , file_name ) if head_row > 0 : data = pd . read_csv ( path , index_col = index_col , skiprows = [ i for i in range ( head_row - 1 ) ] ) else : data = pd . read_csv ( path , index_col = index_col ) try : data . index = pd . to_datetime ( data . index , format = '%m/%d/%y %H:%M' ) except : data . index = pd . to_datetime ( data . index , dayfirst = False , infer_datetime_format = True ) if convert_col : for col in data . columns : if data [ col ] . dtype != np . number : data [ col ] = pd . to_numeric ( data [ col ] , errors = "coerce" ) return data | Load single csv file . |
46,071 | def convert_to_utc ( time ) : if 'Z' in time : return time else : time_formatted = time [ : - 3 ] + time [ - 2 : ] dt = datetime . strptime ( time_formatted , '%Y-%m-%dT%H:%M:%S%z' ) dt = dt . astimezone ( timezone ( 'UTC' ) ) return dt . strftime ( '%Y-%m-%dT%H:%M:%SZ' ) | Convert time to UTC |
46,072 | def get_meter ( self , site , start , end , point_type = 'Green_Button_Meter' , var = "meter" , agg = 'MEAN' , window = '24h' , aligned = True , return_names = True ) : start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) request = self . compose_MDAL_dic ( point_type = point_type , site = site , start = start , end = end , var = var , agg = agg , window = window , aligned = aligned ) resp = self . m . query ( request ) if return_names : resp = self . replace_uuid_w_names ( resp ) return resp | Get meter data from MDAL . |
46,073 | def get_tstat ( self , site , start , end , var = "tstat_temp" , agg = 'MEAN' , window = '24h' , aligned = True , return_names = True ) : start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) point_map = { "tstat_state" : "Thermostat_Status" , "tstat_hsp" : "Supply_Air_Temperature_Heating_Setpoint" , "tstat_csp" : "Supply_Air_Temperature_Cooling_Setpoint" , "tstat_temp" : "Temperature_Sensor" } if isinstance ( var , list ) : point_type = [ point_map [ point_type ] for point_type in var ] else : point_type = point_map [ var ] request = self . compose_MDAL_dic ( point_type = point_type , site = site , start = start , end = end , var = var , agg = agg , window = window , aligned = aligned ) resp = self . m . query ( request ) if return_names : resp = self . replace_uuid_w_names ( resp ) return resp | Get thermostat data from MDAL . |
46,074 | def compose_MDAL_dic ( self , site , point_type , start , end , var , agg , window , aligned , points = None , return_names = False ) : start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) request = { } request [ 'Time' ] = { 'Start' : start , 'End' : end , 'Window' : window , 'Aligned' : aligned } request [ "Variables" ] = { } request [ 'Composition' ] = [ ] request [ 'Aggregation' ] = { } if isinstance ( point_type , str ) : request [ "Variables" ] [ var ] = self . compose_BRICK_query ( point_type = point_type , site = site ) request [ 'Composition' ] = [ var ] request [ 'Aggregation' ] [ var ] = [ agg ] elif isinstance ( point_type , list ) : for idx , point in enumerate ( point_type ) : request [ "Variables" ] [ var [ idx ] ] = self . compose_BRICK_query ( point_type = point , site = site ) request [ 'Composition' ] . append ( var [ idx ] ) if isinstance ( agg , str ) : request [ 'Aggregation' ] [ var [ idx ] ] = [ agg ] elif isinstance ( agg , list ) : request [ 'Aggregation' ] [ var [ idx ] ] = [ agg [ idx ] ] return request | Create dictionary for MDAL request . |
46,075 | def get_point_name ( self , context ) : metadata_table = self . parse_context ( context ) return metadata_table . apply ( self . strip_point_name , axis = 1 ) | Get point name . |
46,076 | def replace_uuid_w_names ( self , resp ) : col_mapper = self . get_point_name ( resp . context ) [ "?point" ] . to_dict ( ) resp . df . rename ( columns = col_mapper , inplace = True ) return resp | Replace the uuid s with names . |
46,077 | def resample_data ( self , data , freq , resampler = 'mean' ) : if resampler == 'mean' : data = data . resample ( freq ) . mean ( ) elif resampler == 'max' : data = data . resample ( freq ) . max ( ) else : raise ValueError ( 'Resampler can be \'mean\' or \'max\' only.' ) return data | Resample dataframe . |
46,078 | def interpolate_data ( self , data , limit , method ) : data = data . interpolate ( how = "index" , limit = limit , method = method ) return data | Interpolate dataframe . |
46,079 | def remove_na ( self , data , remove_na_how ) : data = data . dropna ( how = remove_na_how ) return data | Remove NAs from dataframe . |
46,080 | def remove_outlier ( self , data , sd_val ) : data = data . dropna ( ) data = data [ ( np . abs ( stats . zscore ( data ) ) < float ( sd_val ) ) . all ( axis = 1 ) ] return data | Remove outliers from dataframe . |
46,081 | def remove_out_of_bounds ( self , data , low_bound , high_bound ) : data = data . dropna ( ) data = data [ ( data > low_bound ) . all ( axis = 1 ) & ( data < high_bound ) . all ( axis = 1 ) ] return data | Remove out of bound datapoints from dataframe . |
46,082 | def _set_TS_index ( self , data ) : data . index = pd . to_datetime ( data . index , error = "ignore" ) for col in data . columns : data [ col ] = pd . to_numeric ( data [ col ] , errors = "coerce" ) return data | Convert index to datetime and all other columns to numeric |
46,083 | def _utc_to_local ( self , data , local_zone = "America/Los_Angeles" ) : data . index = data . index . tz_localize ( pytz . utc ) . tz_convert ( local_zone ) data . index = data . index . tz_localize ( None ) return data | Adjust index of dataframe according to timezone that is requested by user . |
46,084 | def _local_to_utc ( self , timestamp , local_zone = "America/Los_Angeles" ) : timestamp_new = pd . to_datetime ( timestamp , infer_datetime_format = True , errors = 'coerce' ) timestamp_new = timestamp_new . tz_localize ( local_zone ) . tz_convert ( pytz . utc ) timestamp_new = timestamp_new . strftime ( '%Y-%m-%d %H:%M:%S' ) return timestamp_new | Convert local timestamp to UTC . |
46,085 | def find_uuid ( self , obj , column_name ) : keys = obj . context . keys ( ) for i in keys : if column_name in obj . context [ i ] [ '?point' ] : uuid = i return i | Find uuid . |
46,086 | def identify_missing ( self , df , check_start = True ) : data_missing = df . isnull ( ) * 1 col_name = str ( data_missing . columns [ 0 ] ) if check_start & data_missing [ col_name ] [ 0 ] == 1 : data_missing [ col_name ] [ 0 ] = 0 return data_missing , col_name | Identify missing data . |
46,087 | def diff_boolean ( self , df , column_name = None , uuid = None , duration = True , min_event_filter = '3 hours' ) : if uuid == None : uuid = 'End' data_gaps = df [ ( df . diff ( ) == 1 ) | ( df . diff ( ) == - 1 ) ] . dropna ( ) data_gaps [ "duration" ] = abs ( data_gaps . index . to_series ( ) . diff ( periods = - 1 ) ) data_gaps [ uuid ] = data_gaps . index + ( data_gaps [ "duration" ] ) data_gaps = data_gaps [ data_gaps [ "duration" ] > pd . Timedelta ( min_event_filter ) ] data_gaps = data_gaps [ data_gaps [ column_name ] == 1 ] data_gaps . pop ( column_name ) if not duration : data_gaps . pop ( 'duration' ) data_gaps . index = data_gaps . index . strftime ( date_format = "%Y-%m-%d %H:%M:%S" ) data_gaps [ uuid ] = data_gaps [ uuid ] . dt . strftime ( date_format = "%Y-%m-%d %H:%M:%S" ) return data_gaps | takes the dataframe of missing values and returns a dataframe that indicates the length of each event where data was continuously missing |
46,088 | def analyze_quality_table ( self , obj , low_bound = None , high_bound = None ) : data = obj . df N_rows = 3 N_cols = data . shape [ 1 ] d = pd . DataFrame ( np . zeros ( ( N_rows , N_cols ) ) , index = [ '% Missing' , 'AVG Length Missing' , 'Std dev. Missing' ] , columns = [ data . columns ] ) if low_bound : data = data . where ( data >= low_bound ) if high_bound : data = data . where ( data < high_bound ) for i in range ( N_cols ) : data_per_meter = data . iloc [ : , [ i ] ] data_missing , meter = self . identify_missing ( data_per_meter ) percentage = data_missing . sum ( ) / ( data . shape [ 0 ] ) * 100 data_gaps = self . diff_boolean ( data_missing , column_name = meter ) missing_mean = data_gaps . mean ( ) std_dev = data_gaps . std ( ) d . loc [ "% Missing" , meter ] = percentage [ meter ] d . loc [ "AVG Length Missing" , meter ] = missing_mean [ 'duration' ] d . loc [ "Std dev. Missing" , meter ] = std_dev [ 'duration' ] return d | Takes in an the object returned by the MDAL query and analyzes the quality of the data for each column in the df . Returns a df of data quality metrics |
46,089 | def analyze_quality_graph ( self , obj ) : data = obj . df for i in range ( data . shape [ 1 ] ) : data_per_meter = data . iloc [ : , [ i ] ] data_missing , meter = self . identify_missing ( data_per_meter ) percentage = data_missing . sum ( ) / ( data . shape [ 0 ] ) * 100 print ( 'Percentage Missing of ' + meter + ' data: ' + str ( int ( percentage ) ) + '%' ) data_missing . plot ( figsize = ( 18 , 5 ) , x_compat = True , title = meter + " Missing Data over the Time interval" ) data_gaps = self . diff_boolean ( data_missing , column_name = meter ) data_missing [ 'Hour' ] = data_missing . index . hour ymax = int ( data_missing . groupby ( 'Hour' ) . sum ( ) . max ( ) + 10 ) data_missing . groupby ( 'Hour' ) . sum ( ) . plot ( ylim = ( 0 , ymax ) , figsize = ( 18 , 5 ) , title = meter + " Time of Day of Missing Data" ) print ( data_gaps ) | Takes in an the object returned by the MDAL query and analyzes the quality of the data for each column in the df in the form of graphs . The Graphs returned show missing data events over time and missing data frequency during each hour of the day |
46,090 | def clean_data ( self , resample = True , freq = 'h' , resampler = 'mean' , interpolate = True , limit = 1 , method = 'linear' , remove_na = True , remove_na_how = 'any' , remove_outliers = True , sd_val = 3 , remove_out_of_bounds = True , low_bound = 0 , high_bound = 9998 ) : data = self . original_data if resample : try : data = self . resample_data ( data , freq , resampler ) except Exception as e : raise e if interpolate : try : data = self . interpolate_data ( data , limit = limit , method = method ) except Exception as e : raise e if remove_na : try : data = self . remove_na ( data , remove_na_how ) except Exception as e : raise e if remove_outliers : try : data = self . remove_outliers ( data , sd_val ) except Exception as e : raise e if remove_out_of_bounds : try : data = self . remove_out_of_bounds ( data , low_bound , high_bound ) except Exception as e : raise e self . cleaned_data = data | Clean dataframe . |
46,091 | def write_json ( self ) : with open ( self . results_folder_name + '/results-' + str ( self . get_global_count ( ) ) + '.json' , 'a' ) as f : json . dump ( self . result , f ) | Dump data into json file . |
46,092 | def site_analysis ( self , folder_name , site_install_mapping , end_date ) : def count_number_of_days ( site , end_date ) : start_date = site_install_mapping [ site ] start_date = start_date . split ( '-' ) start = date ( int ( start_date [ 0 ] ) , int ( start_date [ 1 ] ) , int ( start_date [ 2 ] ) ) end_date = end_date . split ( '-' ) end = date ( int ( end_date [ 0 ] ) , int ( end_date [ 1 ] ) , int ( end_date [ 2 ] ) ) delta = end - start return delta . days if not folder_name or not isinstance ( folder_name , str ) : raise TypeError ( "folder_name should be type string" ) else : list_json_files = [ ] df = pd . DataFrame ( ) temp_df = pd . DataFrame ( ) json_files = [ f for f in os . listdir ( folder_name ) if f . endswith ( '.json' ) ] for json_file in json_files : with open ( folder_name + json_file ) as f : js = json . load ( f ) num_days = count_number_of_days ( js [ 'Site' ] , end_date ) e_abs_sav = round ( js [ 'Energy Savings (absolute)' ] / 1000 , 2 ) e_perc_sav = round ( js [ 'Energy Savings (%)' ] , 2 ) ann_e_abs_sav = ( e_abs_sav / num_days ) * 365 d_abs_sav = round ( js [ 'User Comments' ] [ 'Dollar Savings (absolute)' ] , 2 ) d_perc_sav = round ( js [ 'User Comments' ] [ 'Dollar Savings (%)' ] , 2 ) ann_d_abs_sav = ( d_abs_sav / num_days ) * 365 temp_df = pd . DataFrame ( { 'Site' : js [ 'Site' ] , '#Days since Pelican Installation' : num_days , 'Energy Savings (%)' : e_perc_sav , 'Energy Savings (kWh)' : e_abs_sav , 'Annualized Energy Savings (kWh)' : ann_e_abs_sav , 'Dollar Savings (%)' : d_perc_sav , 'Dollar Savings ($)' : d_abs_sav , 'Annualized Dollar Savings ($)' : ann_d_abs_sav , 'Best Model' : js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'name' ] , 'Adj R2' : round ( js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_cross_val_score' ] , 2 ) , 'RMSE' : round ( js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'rmse' ] , 2 ) , 'MAPE' : js [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'mape' ] , 'Uncertainity' : js [ 'Uncertainity' ] , } , index = [ 0 ] ) df = df . append ( temp_df ) df . set_index ( 'Site' , inplace = True ) return df | Summarize site data into a single table . |
46,093 | def search ( self , file_name , imported_data = None ) : resample_freq = [ '15T' , 'h' , 'd' ] time_freq = { 'year' : [ True , False , False , False , False ] , 'month' : [ False , True , False , False , False ] , 'week' : [ False , False , True , False , False ] , 'tod' : [ False , False , False , True , False ] , 'dow' : [ False , False , False , False , True ] , } optimal_score = float ( '-inf' ) optimal_model = None if not imported_data : with open ( file_name ) as f : input_json = json . load ( f ) import_json = input_json [ 'Import' ] imported_data = self . import_data ( file_name = import_json [ 'File Name' ] , folder_name = import_json [ 'Folder Name' ] , head_row = import_json [ 'Head Row' ] , index_col = import_json [ 'Index Col' ] , convert_col = import_json [ 'Convert Col' ] , concat_files = import_json [ 'Concat Files' ] , save_file = import_json [ 'Save File' ] ) with open ( file_name ) as f : input_json = json . load ( f ) for x in resample_freq : input_json [ 'Clean' ] [ 'Frequency' ] = x for i in range ( len ( time_freq . items ( ) ) ) : input_json [ 'Preprocess' ] [ 'Year' ] = time_freq [ 'year' ] [ i ] input_json [ 'Preprocess' ] [ 'Month' ] = time_freq [ 'month' ] [ i ] input_json [ 'Preprocess' ] [ 'Week' ] = time_freq [ 'week' ] [ i ] input_json [ 'Preprocess' ] [ 'Time of Day' ] = time_freq [ 'tod' ] [ i ] input_json [ 'Preprocess' ] [ 'Day of Week' ] = time_freq [ 'dow' ] [ i ] time_feature = None for key in time_freq : if time_freq [ key ] [ i ] : time_feature = key self . result [ 'Comment' ] = 'Freq: ' + x + ', ' + 'Time Feature: ' + time_feature self . read_json ( file_name = None , input_json = input_json , imported_data = imported_data ) if self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_r2' ] > optimal_score : optimal_score = self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] [ 'adj_r2' ] optimal_model_file_name = self . results_folder_name + '/results-' + str ( self . get_global_count ( ) ) + '.json' print ( 'Most optimal model: ' , optimal_model_file_name ) freq = self . result [ 'Comment' ] . split ( ' ' ) [ 1 ] [ : - 1 ] time_feat = self . result [ 'Comment' ] . split ( ' ' ) [ - 1 ] print ( 'Freq: ' , freq , 'Time Feature: ' , time_feat ) | Run models on different data configurations . |
46,094 | def clean_data ( self , data , rename_col = None , drop_col = None , resample = True , freq = 'h' , resampler = 'mean' , interpolate = True , limit = 1 , method = 'linear' , remove_na = True , remove_na_how = 'any' , remove_outliers = True , sd_val = 3 , remove_out_of_bounds = True , low_bound = 0 , high_bound = float ( 'inf' ) , save_file = True ) : if not isinstance ( data , pd . DataFrame ) : raise TypeError ( 'data has to be a pandas dataframe.' ) clean_data_obj = Clean_Data ( data ) clean_data_obj . clean_data ( resample = resample , freq = freq , resampler = resampler , interpolate = interpolate , limit = limit , method = method , remove_na = remove_na , remove_na_how = remove_na_how , remove_outliers = remove_outliers , sd_val = sd_val , remove_out_of_bounds = remove_out_of_bounds , low_bound = low_bound , high_bound = high_bound ) if rename_col : clean_data_obj . rename_columns ( rename_col ) if drop_col : clean_data_obj . drop_columns ( drop_col ) self . cleaned_data = clean_data_obj . cleaned_data self . result [ 'Clean' ] = { 'Rename Col' : rename_col , 'Drop Col' : drop_col , 'Resample' : resample , 'Frequency' : freq , 'Resampler' : resampler , 'Interpolate' : interpolate , 'Limit' : limit , 'Method' : method , 'Remove NA' : remove_na , 'Remove NA How' : remove_na_how , 'Remove Outliers' : remove_outliers , 'SD Val' : sd_val , 'Remove Out of Bounds' : remove_out_of_bounds , 'Low Bound' : low_bound , 'High Bound' : str ( high_bound ) if high_bound == float ( 'inf' ) else high_bound , 'Save File' : save_file } if save_file : f = self . results_folder_name + '/cleaned_data-' + str ( self . get_global_count ( ) ) + '.csv' self . cleaned_data . to_csv ( f ) self . result [ 'Clean' ] [ 'Saved File' ] = f else : self . result [ 'Clean' ] [ 'Saved File' ] = '' return self . cleaned_data | Cleans dataframe according to user specifications and stores result in self . cleaned_data . |
46,095 | def prevmonday ( num ) : today = get_today ( ) lastmonday = today - timedelta ( days = today . weekday ( ) , weeks = num ) return lastmonday | Return unix SECOND timestamp of num mondays ago |
46,096 | def med_filt ( x , k = 201 ) : if x . ndim > 1 : x = np . squeeze ( x ) med = np . median ( x ) assert k % 2 == 1 , "Median filter length must be odd." assert x . ndim == 1 , "Input must be one-dimensional." k2 = ( k - 1 ) // 2 y = np . zeros ( ( len ( x ) , k ) , dtype = x . dtype ) y [ : , k2 ] = x for i in range ( k2 ) : j = k2 - i y [ j : , i ] = x [ : - j ] y [ : j , i ] = x [ 0 ] y [ : - j , - ( i + 1 ) ] = x [ j : ] y [ - j : , - ( i + 1 ) ] = med return np . median ( y , axis = 1 ) | Apply a length - k median filter to a 1D array x . Boundaries are extended by repeating endpoints . |
46,097 | def preprocess_data ( self , data , hdh_cpoint = 65 , cdh_cpoint = 65 , col_hdh_cdh = None , col_degree = None , degree = None , standardize = False , normalize = False , year = False , month = False , week = False , tod = False , dow = False , save_file = True ) : if not isinstance ( data , pd . DataFrame ) : raise SystemError ( 'data has to be a pandas dataframe.' ) preprocess_data_obj = Preprocess_Data ( data ) if col_hdh_cdh : preprocess_data_obj . add_degree_days ( col = col_hdh_cdh , hdh_cpoint = hdh_cpoint , cdh_cpoint = cdh_cpoint ) preprocess_data_obj . add_col_features ( col = col_degree , degree = degree ) if standardize : preprocess_data_obj . standardize ( ) if normalize : preprocess_data_obj . normalize ( ) preprocess_data_obj . add_time_features ( year = year , month = month , week = week , tod = tod , dow = dow ) self . preprocessed_data = preprocess_data_obj . preprocessed_data self . result [ 'Preprocess' ] = { 'HDH CPoint' : hdh_cpoint , 'CDH CPoint' : cdh_cpoint , 'HDH CDH Calc Col' : col_hdh_cdh , 'Col Degree' : col_degree , 'Degree' : degree , 'Standardize' : standardize , 'Normalize' : normalize , 'Year' : year , 'Month' : month , 'Week' : week , 'Time of Day' : tod , 'Day of Week' : dow , 'Save File' : save_file } if save_file : f = self . results_folder_name + '/preprocessed_data-' + str ( self . get_global_count ( ) ) + '.csv' self . preprocessed_data . to_csv ( f ) self . result [ 'Preprocess' ] [ 'Saved File' ] = f else : self . result [ 'Preprocess' ] [ 'Saved File' ] = '' return self . preprocessed_data | Preprocesses dataframe according to user specifications and stores result in self . preprocessed_data . |
46,098 | def model ( self , data , ind_col = None , dep_col = None , project_ind_col = None , baseline_period = [ None , None ] , projection_period = None , exclude_time_period = None , alphas = np . logspace ( - 4 , 1 , 30 ) , cv = 3 , plot = True , figsize = None , custom_model_func = None ) : if not isinstance ( data , pd . DataFrame ) : raise SystemError ( 'data has to be a pandas dataframe.' ) model_data_obj = Model_Data ( data , ind_col , dep_col , alphas , cv , exclude_time_period , baseline_period , projection_period ) model_data_obj . split_data ( ) self . result [ 'Model' ] = { 'Independent Col' : ind_col , 'Dependent Col' : dep_col , 'Projection Independent Col' : project_ind_col , 'Baseline Period' : baseline_period , 'Projection Period' : projection_period , 'Exclude Time Period' : exclude_time_period , 'Alphas' : list ( alphas ) , 'CV' : cv , 'Plot' : plot , 'Fig Size' : figsize } all_metrics = model_data_obj . run_models ( ) self . result [ 'Model' ] [ 'All Model\'s Metrics' ] = all_metrics if custom_model_func : self . result [ 'Model' ] [ 'Custom Model\'s Metrics' ] = model_data_obj . custom_model ( custom_model_func ) self . result [ 'Model' ] [ 'Optimal Model\'s Metrics' ] = model_data_obj . best_model_fit ( ) if plot : input_col = model_data_obj . input_col if not project_ind_col else project_ind_col fig , y_true , y_pred = self . plot_data_obj . baseline_projection_plot ( model_data_obj . y_true , model_data_obj . y_pred , model_data_obj . baseline_period , model_data_obj . projection_period , model_data_obj . best_model_name , model_data_obj . best_metrics [ 'adj_r2' ] , model_data_obj . original_data , input_col , model_data_obj . output_col , model_data_obj . best_model , self . result [ 'Site' ] ) fig . savefig ( self . results_folder_name + '/baseline_projection_plot-' + str ( self . get_global_count ( ) ) + '.png' ) if not y_true . empty and not y_pred . empty : saving_absolute = ( y_pred - y_true ) . sum ( ) saving_perc = ( saving_absolute / y_pred . sum ( ) ) * 100 self . result [ 'Energy Savings (%)' ] = float ( saving_perc ) self . result [ 'Energy Savings (absolute)' ] = saving_absolute self . project_df [ 'true' ] = y_true self . project_df [ 'pred' ] = y_pred self . result [ 'Uncertainity' ] = self . uncertainity_equation ( model_data_obj , y_true , y_pred , 0.9 ) else : print ( 'y_true: ' , y_true ) print ( 'y_pred: ' , y_pred ) print ( 'Error: y_true and y_pred are empty. Default to -1.0 savings.' ) self . result [ 'Energy Savings (%)' ] = float ( - 1.0 ) self . result [ 'Energy Savings (absolute)' ] = float ( - 1.0 ) return self . best_metrics | Split data into baseline and projection periods run models on them and display metrics & plots . |
46,099 | def make_dataframe ( result ) : import pandas as pd ret = { } if isinstance ( result , dict ) : if 'timeseries' in result : result = result [ 'timeseries' ] for uuid , data in result . items ( ) : df = pd . DataFrame ( data ) if len ( df . columns ) == 5 : df . columns = [ 'time' , 'min' , 'mean' , 'max' , 'count' ] else : df . columns = [ 'time' , 'value' ] df [ 'time' ] = pd . to_datetime ( df [ 'time' ] , unit = 'ns' ) df = df . set_index ( df . pop ( 'time' ) ) ret [ uuid ] = df return ret | Turns the results of one of the data API calls into a pandas dataframe |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.