idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
233,900
def getParameters ( self ) : parameters = lock_and_call ( lambda : self . _impl . getParameters ( ) , self . _lock ) return EntityMap ( parameters , Parameter )
Get all the parameters declared .
41
6
233,901
def getCurrentObjective ( self ) : name = self . _impl . getCurrentObjectiveName ( ) if name == '' : return None else : return self . getObjective ( name )
Get the the current objective . Returns None if no objective is set .
41
14
233,902
def _obj ( self ) : class Objectives ( object ) : def __getitem__ ( _self , name ) : return self . getObjective ( name ) def __iter__ ( _self ) : return self . getObjectives ( ) return Objectives ( )
Get an objective .
57
4
233,903
def exportData ( self , datfile ) : def ampl_set ( name , values ) : def format_entry ( e ) : return repr ( e ) . replace ( ' ' , '' ) return 'set {0} := {1};' . format ( name , ',' . join ( format_entry ( e ) for e in values ) ) def ampl_param ( name , values ) : def format_entry ( k , v ) : k = repr ( k ) . strip ( '()' ) . replace ( ' ' , '' ) if v == inf : v = "Infinity" elif v == - inf : v = "-Infinity" else : v = repr ( v ) . strip ( '()' ) . replace ( ' ' , '' ) return '[{0}]{1}' . format ( k , v ) return 'param {0} := {1};' . format ( name , '' . join ( format_entry ( k , v ) for k , v in values . items ( ) ) ) with open ( datfile , 'w' ) as f : for name , entity in self . getSets ( ) : values = entity . getValues ( ) . toList ( ) print ( ampl_set ( name , values ) , file = f ) for name , entity in self . getParameters ( ) : if entity . isScalar ( ) : print ( 'param {} := {};' . format ( name , entity . value ( ) ) , file = f ) else : values = entity . getValues ( ) . toDict ( ) print ( ampl_param ( name , values ) , file = f )
Create a . dat file with the data that has been loaded .
351
13
233,904
def importGurobiSolution ( self , grbmodel ) : self . eval ( '' . join ( 'let {} := {};' . format ( var . VarName , var . X ) for var in grbmodel . getVars ( ) if '$' not in var . VarName ) )
Import the solution from a gurobipy . Model object .
65
13
233,905
def _startRecording ( self , filename ) : self . setOption ( '_log_file_name' , filename ) self . setOption ( '_log_input_only' , True ) self . setOption ( '_log' , True )
Start recording the session to a file for debug purposes .
55
11
233,906
def _loadSession ( self , filename ) : try : self . eval ( open ( filename ) . read ( ) ) except RuntimeError as e : print ( e )
Load a recorded session .
35
5
233,907
def ls_dir ( base_dir ) : return [ os . path . join ( dirpath . replace ( base_dir , '' , 1 ) , f ) for ( dirpath , dirnames , files ) in os . walk ( base_dir ) for f in files ]
List files recursively .
58
6
233,908
def get ( self , * index ) : assert self . wrapFunction is not None if len ( index ) == 1 and isinstance ( index [ 0 ] , ( tuple , list ) ) : index = index [ 0 ] if len ( index ) == 0 : return self . wrapFunction ( self . _impl . get ( ) ) else : return self . wrapFunction ( self . _impl . get ( Tuple ( index ) . _impl ) )
Get the instance with the specified index .
94
8
233,909
def find ( self , * index ) : assert self . wrapFunction is not None if len ( index ) == 1 and isinstance ( index [ 0 ] , ( tuple , list ) ) : index = index [ 0 ] it = self . _impl . find ( Tuple ( index ) . _impl ) if it == self . _impl . end ( ) : return None else : return self . wrapFunction ( it )
Searches the current entity for an instance with the specified index .
88
14
233,910
def addRow ( self , * value ) : if len ( value ) == 1 and isinstance ( value [ 0 ] , ( tuple , list ) ) : value = value [ 0 ] assert len ( value ) == self . getNumCols ( ) self . _impl . addRow ( Tuple ( value ) . _impl )
Add a row to the DataFrame . The size of the tuple must be equal to the total number of columns in the dataframe .
70
27
233,911
def addColumn ( self , header , values = [ ] ) : if len ( values ) == 0 : self . _impl . addColumn ( header ) else : assert len ( values ) == self . getNumRows ( ) if any ( isinstance ( value , basestring ) for value in values ) : values = list ( map ( str , values ) ) self . _impl . addColumnStr ( header , values ) elif all ( isinstance ( value , Real ) for value in values ) : values = list ( map ( float , values ) ) self . _impl . addColumnDbl ( header , values ) else : raise NotImplementedError
Add a new column with the corresponding header and values to the dataframe .
140
15
233,912
def setColumn ( self , header , values ) : if any ( isinstance ( value , basestring ) for value in values ) : values = list ( map ( str , values ) ) self . _impl . setColumnStr ( header , values , len ( values ) ) elif all ( isinstance ( value , Real ) for value in values ) : values = list ( map ( float , values ) ) self . _impl . setColumnDbl ( header , values , len ( values ) ) else : print ( values ) raise NotImplementedError
Set the values of a column .
117
7
233,913
def getRow ( self , key ) : return Row ( self . _impl . getRow ( Tuple ( key ) . _impl ) )
Get a row by value of the indexing columns . If the index is not specified gets the only row of a dataframe with no indexing columns .
30
31
233,914
def getRowByIndex ( self , index ) : assert isinstance ( index , int ) return Row ( self . _impl . getRowByIndex ( index ) )
Get row by numeric index .
35
6
233,915
def getHeaders ( self ) : headers = self . _impl . getHeaders ( ) return tuple ( headers . getIndex ( i ) for i in range ( self . _impl . getNumCols ( ) ) )
Get the headers of this DataFrame .
48
8
233,916
def setValues ( self , values ) : ncols = self . getNumCols ( ) nindices = self . getNumIndices ( ) for key , value in values . items ( ) : key = Utils . convToList ( key ) assert len ( key ) == nindices value = Utils . convToList ( value ) assert len ( value ) == ncols - nindices self . addRow ( key + value )
Set the values of a DataFrame from a dictionary .
97
11
233,917
def toDict ( self ) : d = { } nindices = self . getNumIndices ( ) for i in range ( self . getNumRows ( ) ) : row = list ( self . getRowByIndex ( i ) ) if nindices > 1 : key = tuple ( row [ : nindices ] ) elif nindices == 1 : key = row [ 0 ] else : key = None if len ( row ) - nindices == 0 : d [ key ] = None elif len ( row ) - nindices == 1 : d [ key ] = row [ nindices ] else : d [ key ] = tuple ( row [ nindices : ] ) return d
Return a dictionary with the DataFrame data .
152
9
233,918
def toList ( self ) : if self . getNumCols ( ) > 1 : return [ tuple ( self . getRowByIndex ( i ) ) for i in range ( self . getNumRows ( ) ) ] else : return [ self . getRowByIndex ( i ) [ 0 ] for i in range ( self . getNumRows ( ) ) ]
Return a list with the DataFrame data .
79
9
233,919
def toPandas ( self ) : assert pd is not None nindices = self . getNumIndices ( ) headers = self . getHeaders ( ) columns = { header : list ( self . getColumn ( header ) ) for header in headers [ nindices : ] } index = zip ( * [ list ( self . getColumn ( header ) ) for header in headers [ : nindices ] ] ) index = [ key if len ( key ) > 1 else key [ 0 ] for key in index ] if index == [ ] : return pd . DataFrame ( columns , index = None ) else : return pd . DataFrame ( columns , index = index )
Return a pandas DataFrame with the DataFrame data .
144
12
233,920
def set ( self , * args ) : assert len ( args ) in ( 1 , 2 ) if len ( args ) == 1 : value = args [ 0 ] self . _impl . set ( value ) else : index , value = args if isinstance ( value , Real ) : self . _impl . setTplDbl ( Tuple ( index ) . _impl , value ) elif isinstance ( value , basestring ) : self . _impl . setTplStr ( Tuple ( index ) . _impl , value ) else : raise TypeError
Set the value of a single instance of this parameter .
119
11
233,921
def setValues ( self , values ) : if isinstance ( values , ( list , set ) ) : if any ( isinstance ( value , basestring ) for value in values ) : values = list ( map ( str , values ) ) self . _impl . setValuesStr ( values , len ( values ) ) elif all ( isinstance ( value , Real ) for value in values ) : values = list ( map ( float , values ) ) self . _impl . setValuesDbl ( values , len ( values ) ) elif all ( isinstance ( value , tuple ) for value in values ) : self . _impl . setValues ( Utils . toTupleArray ( values ) , len ( values ) ) else : raise TypeError else : if np is not None and isinstance ( values , np . ndarray ) : self . setValues ( DataFrame . fromNumpy ( values ) . toList ( ) ) return Entity . setValues ( self , values )
Set the tuples in this set . Valid only for non - indexed sets .
208
16
233,922
def error ( self , amplexception ) : msg = '\t' + str ( amplexception ) . replace ( '\n' , '\n\t' ) print ( 'Error:\n{:s}' . format ( msg ) ) raise amplexception
Receives notification of an error .
59
8
233,923
def warning ( self , amplexception ) : msg = '\t' + str ( amplexception ) . replace ( '\n' , '\n\t' ) print ( 'Warning:\n{:s}' . format ( msg ) )
Receives notification of a warning .
55
8
233,924
def register_magics ( store_name = '_ampl_cells' , ampl_object = None ) : from IPython . core . magic import ( Magics , magics_class , cell_magic , line_magic ) @ magics_class class StoreAMPL ( Magics ) : def __init__ ( self , shell = None , * * kwargs ) : Magics . __init__ ( self , shell = shell , * * kwargs ) self . _store = [ ] shell . user_ns [ store_name ] = self . _store @ cell_magic def ampl ( self , line , cell ) : """Store the cell in the store""" self . _store . append ( cell ) @ cell_magic def ampl_eval ( self , line , cell ) : """Evaluate the cell""" ampl_object . eval ( cell ) @ line_magic def get_ampl ( self , line ) : """Retrieve the store""" return self . _store get_ipython ( ) . register_magics ( StoreAMPL )
Register jupyter notebook magics %%ampl and %%ampl_eval .
227
18
233,925
def fix ( self , value = None ) : if value is None : self . _impl . fix ( ) else : self . _impl . fix ( value )
Fix all instances of this variable to a value if provided or to their current value otherwise .
34
18
233,926
def toVName ( name , stripNum = 0 , upper = False ) : if upper : name = name . upper ( ) if stripNum != 0 : name = name [ : - stripNum ] return name . replace ( '_' , '-' )
Turn a Python name into an iCalendar style name optionally uppercase and with characters stripped off .
54
21
233,927
def readComponents ( streamOrString , validate = False , transform = True , ignoreUnreadable = False , allowQP = False ) : if isinstance ( streamOrString , basestring ) : stream = six . StringIO ( streamOrString ) else : stream = streamOrString try : stack = Stack ( ) versionLine = None n = 0 for line , n in getLogicalLines ( stream , allowQP ) : if ignoreUnreadable : try : vline = textLineToContentLine ( line , n ) except VObjectError as e : if e . lineNumber is not None : msg = "Skipped line {lineNumber}, message: {msg}" else : msg = "Skipped a line, message: {msg}" logger . error ( msg . format ( * * { 'lineNumber' : e . lineNumber , 'msg' : str ( e ) } ) ) continue else : vline = textLineToContentLine ( line , n ) if vline . name == "VERSION" : versionLine = vline stack . modifyTop ( vline ) elif vline . name == "BEGIN" : stack . push ( Component ( vline . value , group = vline . group ) ) elif vline . name == "PROFILE" : if not stack . top ( ) : stack . push ( Component ( ) ) stack . top ( ) . setProfile ( vline . value ) elif vline . name == "END" : if len ( stack ) == 0 : err = "Attempted to end the {0} component but it was never opened" raise ParseError ( err . format ( vline . value ) , n ) if vline . value . upper ( ) == stack . topName ( ) : # START matches END if len ( stack ) == 1 : component = stack . pop ( ) if versionLine is not None : component . setBehaviorFromVersionLine ( versionLine ) else : behavior = getBehavior ( component . name ) if behavior : component . setBehavior ( behavior ) if validate : component . validate ( raiseException = True ) if transform : component . transformChildrenToNative ( ) yield component # EXIT POINT else : stack . modifyTop ( stack . pop ( ) ) else : err = "{0} component wasn't closed" raise ParseError ( err . format ( stack . topName ( ) ) , n ) else : stack . modifyTop ( vline ) # not a START or END line if stack . top ( ) : if stack . topName ( ) is None : logger . warning ( "Top level component was never named" ) elif stack . top ( ) . useBegin : raise ParseError ( "Component {0!s} was never closed" . format ( ( stack . topName ( ) ) ) , n ) yield stack . pop ( ) except ParseError as e : e . input = streamOrString raise
Generate one Component at a time from a stream .
619
11
233,928
def readOne ( stream , validate = False , transform = True , ignoreUnreadable = False , allowQP = False ) : return next ( readComponents ( stream , validate , transform , ignoreUnreadable , allowQP ) )
Return the first component from stream .
49
7
233,929
def registerBehavior ( behavior , name = None , default = False , id = None ) : if not name : name = behavior . name . upper ( ) if id is None : id = behavior . versionString if name in __behaviorRegistry : if default : __behaviorRegistry [ name ] . insert ( 0 , ( id , behavior ) ) else : __behaviorRegistry [ name ] . append ( ( id , behavior ) ) else : __behaviorRegistry [ name ] = [ ( id , behavior ) ]
Register the given behavior .
108
5
233,930
def getBehavior ( name , id = None ) : name = name . upper ( ) if name in __behaviorRegistry : if id : for n , behavior in __behaviorRegistry [ name ] : if n == id : return behavior return __behaviorRegistry [ name ] [ 0 ] [ 1 ] return None
Return a matching behavior if it exists or None .
66
10
233,931
def validate ( self , * args , * * kwds ) : if self . behavior : return self . behavior . validate ( self , * args , * * kwds ) return True
Call the behavior s validate method or return True .
40
10
233,932
def autoBehavior ( self , cascade = False ) : parentBehavior = self . parentBehavior if parentBehavior is not None : knownChildTup = parentBehavior . knownChildren . get ( self . name , None ) if knownChildTup is not None : behavior = getBehavior ( self . name , knownChildTup [ 2 ] ) if behavior is not None : self . setBehavior ( behavior , cascade ) if isinstance ( self , ContentLine ) and self . encoded : self . behavior . decode ( self ) elif isinstance ( self , ContentLine ) : self . behavior = parentBehavior . defaultBehavior if self . encoded and self . behavior : self . behavior . decode ( self )
Set behavior if name is in self . parentBehavior . knownChildren .
153
15
233,933
def setBehavior ( self , behavior , cascade = True ) : self . behavior = behavior if cascade : for obj in self . getChildren ( ) : obj . parentBehavior = behavior obj . autoBehavior ( True )
Set behavior . If cascade is True autoBehavior all descendants .
47
13
233,934
def serialize ( self , buf = None , lineLength = 75 , validate = True , behavior = None ) : if not behavior : behavior = self . behavior if behavior : if DEBUG : logger . debug ( "serializing {0!s} with behavior {1!s}" . format ( self . name , behavior ) ) return behavior . serialize ( self , buf , lineLength , validate ) else : if DEBUG : logger . debug ( "serializing {0!s} without behavior" . format ( self . name ) ) return defaultSerialize ( self , buf , lineLength )
Serialize to buf if it exists otherwise return a string .
123
12
233,935
def valueRepr ( self ) : v = self . value if self . behavior : v = self . behavior . valueRepr ( self ) return v
Transform the representation of the value according to the behavior if any .
32
13
233,936
def setProfile ( self , name ) : if self . name or self . useBegin : if self . name == name : return raise VObjectError ( "This component already has a PROFILE or " "uses BEGIN." ) self . name = name . upper ( )
Assign a PROFILE to this unnamed component .
57
10
233,937
def add ( self , objOrName , group = None ) : if isinstance ( objOrName , VBase ) : obj = objOrName if self . behavior : obj . parentBehavior = self . behavior obj . autoBehavior ( True ) else : name = objOrName . upper ( ) try : id = self . behavior . knownChildren [ name ] [ 2 ] behavior = getBehavior ( name , id ) if behavior . isComponent : obj = Component ( name ) else : obj = ContentLine ( name , [ ] , '' , group ) obj . parentBehavior = self . behavior obj . behavior = behavior obj = obj . transformToNative ( ) except ( KeyError , AttributeError ) : obj = ContentLine ( objOrName , [ ] , '' , group ) if obj . behavior is None and self . behavior is not None : if isinstance ( obj , ContentLine ) : obj . behavior = self . behavior . defaultBehavior self . contents . setdefault ( obj . name . lower ( ) , [ ] ) . append ( obj ) return obj
Add objOrName to contents set behavior if it can be inferred .
227
14
233,938
def remove ( self , obj ) : named = self . contents . get ( obj . name . lower ( ) ) if named : try : named . remove ( obj ) if len ( named ) == 0 : del self . contents [ obj . name . lower ( ) ] except ValueError : pass
Remove obj from contents .
61
5
233,939
def setBehaviorFromVersionLine ( self , versionLine ) : v = getBehavior ( self . name , versionLine . value ) if v : self . setBehavior ( v )
Set behavior if one matches name versionLine . value .
40
11
233,940
def transformChildrenToNative ( self ) : for childArray in ( self . contents [ k ] for k in self . sortChildKeys ( ) ) : for child in childArray : child = child . transformToNative ( ) child . transformChildrenToNative ( )
Recursively replace children with their native representation .
55
10
233,941
def transformChildrenFromNative ( self , clearBehavior = True ) : for childArray in self . contents . values ( ) : for child in childArray : child = child . transformFromNative ( ) child . transformChildrenFromNative ( clearBehavior ) if clearBehavior : child . behavior = None child . parentBehavior = None
Recursively transform native children to vanilla representations .
70
10
233,942
def change_tz ( cal , new_timezone , default , utc_only = False , utc_tz = icalendar . utc ) : for vevent in getattr ( cal , 'vevent_list' , [ ] ) : start = getattr ( vevent , 'dtstart' , None ) end = getattr ( vevent , 'dtend' , None ) for node in ( start , end ) : if node : dt = node . value if ( isinstance ( dt , datetime ) and ( not utc_only or dt . tzinfo == utc_tz ) ) : if dt . tzinfo is None : dt = dt . replace ( tzinfo = default ) node . value = dt . astimezone ( new_timezone )
Change the timezone of the specified component .
175
9
233,943
def defaultSerialize ( obj , buf , lineLength ) : outbuf = buf or six . StringIO ( ) if isinstance ( obj , Component ) : if obj . group is None : groupString = '' else : groupString = obj . group + '.' if obj . useBegin : foldOneLine ( outbuf , "{0}BEGIN:{1}" . format ( groupString , obj . name ) , lineLength ) for child in obj . getSortedChildren ( ) : # validate is recursive, we only need to validate once child . serialize ( outbuf , lineLength , validate = False ) if obj . useBegin : foldOneLine ( outbuf , "{0}END:{1}" . format ( groupString , obj . name ) , lineLength ) elif isinstance ( obj , ContentLine ) : startedEncoded = obj . encoded if obj . behavior and not startedEncoded : obj . behavior . encode ( obj ) s = six . StringIO ( ) if obj . group is not None : s . write ( obj . group + '.' ) s . write ( obj . name . upper ( ) ) keys = sorted ( obj . params . keys ( ) ) for key in keys : paramstr = ',' . join ( dquoteEscape ( p ) for p in obj . params [ key ] ) s . write ( ";{0}={1}" . format ( key , paramstr ) ) s . write ( ":{0}" . format ( str_ ( obj . value ) ) ) if obj . behavior and not startedEncoded : obj . behavior . decode ( obj ) foldOneLine ( outbuf , s . getvalue ( ) , lineLength ) return buf or outbuf . getvalue ( )
Encode and fold obj and its children write to buf or return a string .
365
16
233,944
def toUnicode ( s ) : if isinstance ( s , six . binary_type ) : s = s . decode ( 'utf-8' ) return s
Take a string or unicode turn it into unicode decoding as utf - 8
36
17
233,945
def numToDigits ( num , places ) : s = str ( num ) if len ( s ) < places : return ( "0" * ( places - len ( s ) ) ) + s elif len ( s ) > places : return s [ len ( s ) - places : ] else : return s
Helper for converting numbers to textual digits .
66
8
233,946
def timedeltaToString ( delta ) : if delta . days == 0 : sign = 1 else : sign = delta . days / abs ( delta . days ) delta = abs ( delta ) days = delta . days hours = int ( delta . seconds / 3600 ) minutes = int ( ( delta . seconds % 3600 ) / 60 ) seconds = int ( delta . seconds % 60 ) output = '' if sign == - 1 : output += '-' output += 'P' if days : output += '{}D' . format ( days ) if hours or minutes or seconds : output += 'T' elif not days : # Deal with zero duration output += 'T0S' if hours : output += '{}H' . format ( hours ) if minutes : output += '{}M' . format ( minutes ) if seconds : output += '{}S' . format ( seconds ) return output
Convert timedelta to an ical DURATION .
189
12
233,947
def stringToTextValues ( s , listSeparator = ',' , charList = None , strict = False ) : if charList is None : charList = escapableCharList def escapableChar ( c ) : return c in charList def error ( msg ) : if strict : raise ParseError ( msg ) else : logging . error ( msg ) # vars which control state machine charIterator = enumerate ( s ) state = "read normal" current = [ ] results = [ ] while True : try : charIndex , char = next ( charIterator ) except : char = "eof" if state == "read normal" : if char == '\\' : state = "read escaped char" elif char == listSeparator : state = "read normal" current = "" . join ( current ) results . append ( current ) current = [ ] elif char == "eof" : state = "end" else : state = "read normal" current . append ( char ) elif state == "read escaped char" : if escapableChar ( char ) : state = "read normal" if char in 'nN' : current . append ( '\n' ) else : current . append ( char ) else : state = "read normal" # leave unrecognized escaped characters for later passes current . append ( '\\' + char ) elif state == "end" : # an end state if len ( current ) or len ( results ) == 0 : current = "" . join ( current ) results . append ( current ) return results elif state == "error" : # an end state return results else : state = "error" error ( "unknown state: '{0!s}' reached in {1!s}" . format ( state , s ) )
Returns list of strings .
380
5
233,948
def parseDtstart ( contentline , allowSignatureMismatch = False ) : tzinfo = getTzid ( getattr ( contentline , 'tzid_param' , None ) ) valueParam = getattr ( contentline , 'value_param' , 'DATE-TIME' ) . upper ( ) if valueParam == "DATE" : return stringToDate ( contentline . value ) elif valueParam == "DATE-TIME" : try : return stringToDateTime ( contentline . value , tzinfo ) except : if allowSignatureMismatch : return stringToDate ( contentline . value ) else : raise
Convert a contentline s value into a date or date - time .
141
15
233,949
def tzinfo_eq ( tzinfo1 , tzinfo2 , startYear = 2000 , endYear = 2020 ) : if tzinfo1 == tzinfo2 : return True elif tzinfo1 is None or tzinfo2 is None : return False def dt_test ( dt ) : if dt is None : return True return tzinfo1 . utcoffset ( dt ) == tzinfo2 . utcoffset ( dt ) if not dt_test ( datetime . datetime ( startYear , 1 , 1 ) ) : return False for year in range ( startYear , endYear ) : for transitionTo in 'daylight' , 'standard' : t1 = getTransition ( transitionTo , year , tzinfo1 ) t2 = getTransition ( transitionTo , year , tzinfo2 ) if t1 != t2 or not dt_test ( t1 ) : return False return True
Compare offsets and DST transitions from startYear to endYear .
209
13
233,950
def registerTzinfo ( obj , tzinfo ) : tzid = obj . pickTzid ( tzinfo ) if tzid and not getTzid ( tzid , False ) : registerTzid ( tzid , tzinfo ) return tzid
Register tzinfo if it s not already registered return its tzid .
63
16
233,951
def pickTzid ( tzinfo , allowUTC = False ) : if tzinfo is None or ( not allowUTC and tzinfo_eq ( tzinfo , utc ) ) : # If tzinfo is UTC, we don't need a TZID return None # try PyICU's tzid key if hasattr ( tzinfo , 'tzid' ) : return toUnicode ( tzinfo . tzid ) # try pytz zone key if hasattr ( tzinfo , 'zone' ) : return toUnicode ( tzinfo . zone ) # try tzical's tzid key elif hasattr ( tzinfo , '_tzid' ) : return toUnicode ( tzinfo . _tzid ) else : # return tzname for standard (non-DST) time notDST = datetime . timedelta ( 0 ) for month in range ( 1 , 13 ) : dt = datetime . datetime ( 2000 , month , 1 ) if tzinfo . dst ( dt ) == notDST : return toUnicode ( tzinfo . tzname ( dt ) ) # there was no standard time in 2000! raise VObjectError ( "Unable to guess TZID for tzinfo {0!s}" . format ( tzinfo ) )
Given a tzinfo class use known APIs to determine TZID or use tzname .
295
20
233,952
def transformToNative ( obj ) : if not obj . isNative : object . __setattr__ ( obj , '__class__' , RecurringComponent ) obj . isNative = True return obj
Turn a recurring Component into a RecurringComponent .
42
10
233,953
def generateImplicitParameters ( obj ) : if not hasattr ( obj , 'uid' ) : rand = int ( random . random ( ) * 100000 ) now = datetime . datetime . now ( utc ) now = dateTimeToString ( now ) host = socket . gethostname ( ) obj . add ( ContentLine ( 'UID' , [ ] , "{0} - {1}@{2}" . format ( now , rand , host ) ) )
Generate a UID if one does not exist .
101
10
233,954
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True if obj . value == '' : return obj obj . value = obj . value obj . value = parseDtstart ( obj , allowSignatureMismatch = True ) if getattr ( obj , 'value_param' , 'DATE-TIME' ) . upper ( ) == 'DATE-TIME' : if hasattr ( obj , 'tzid_param' ) : # Keep a copy of the original TZID around obj . params [ 'X-VOBJ-ORIGINAL-TZID' ] = [ obj . tzid_param ] del obj . tzid_param return obj
Turn obj . value into a date or datetime .
154
11
233,955
def transformFromNative ( obj ) : if type ( obj . value ) == datetime . date : obj . isNative = False obj . value_param = 'DATE' obj . value = dateToString ( obj . value ) return obj else : return DateTimeBehavior . transformFromNative ( obj )
Replace the date or datetime in obj . value with an ISO 8601 string .
65
18
233,956
def transformFromNative ( obj ) : if obj . value and type ( obj . value [ 0 ] ) == datetime . date : obj . isNative = False obj . value_param = 'DATE' obj . value = ',' . join ( [ dateToString ( val ) for val in obj . value ] ) return obj # Fixme: handle PERIOD case else : if obj . isNative : obj . isNative = False transformed = [ ] tzid = None for val in obj . value : if tzid is None and type ( val ) == datetime . datetime : tzid = TimezoneComponent . registerTzinfo ( val . tzinfo ) if tzid is not None : obj . tzid_param = tzid transformed . append ( dateTimeToString ( val ) ) obj . value = ',' . join ( transformed ) return obj
Replace the date datetime or period tuples in obj . value with appropriate strings .
190
18
233,957
def decode ( cls , line ) : if line . encoded : line . value = stringToTextValues ( line . value , listSeparator = cls . listSeparator ) line . encoded = False
Remove backslash escaping from line . value then split on commas .
45
15
233,958
def generateImplicitParameters ( obj ) : try : obj . action except AttributeError : obj . add ( 'action' ) . value = 'AUDIO' try : obj . trigger except AttributeError : obj . add ( 'trigger' ) . value = datetime . timedelta ( 0 )
Create default ACTION and TRIGGER if they re not set .
63
13
233,959
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = obj . value if obj . value == '' : return obj else : deltalist = stringToDurations ( obj . value ) # When can DURATION have multiple durations? For now: if len ( deltalist ) == 1 : obj . value = deltalist [ 0 ] return obj else : raise ParseError ( "DURATION must have a single duration string." )
Turn obj . value into a datetime . timedelta .
108
12
233,960
def transformFromNative ( obj ) : if not obj . isNative : return obj obj . isNative = False obj . value = timedeltaToString ( obj . value ) return obj
Replace the datetime . timedelta in obj . value with an RFC2445 string .
38
19
233,961
def transformToNative ( obj ) : if obj . isNative : return obj value = getattr ( obj , 'value_param' , 'DURATION' ) . upper ( ) if hasattr ( obj , 'value_param' ) : del obj . value_param if obj . value == '' : obj . isNative = True return obj elif value == 'DURATION' : try : return Duration . transformToNative ( obj ) except ParseError : logger . warning ( "TRIGGER not recognized as DURATION, trying " "DATE-TIME, because iCal sometimes exports " "DATE-TIMEs without setting VALUE=DATE-TIME" ) try : obj . isNative = False dt = DateTimeBehavior . transformToNative ( obj ) return dt except : msg = "TRIGGER with no VALUE not recognized as DURATION " "or as DATE-TIME" raise ParseError ( msg ) elif value == 'DATE-TIME' : # TRIGGERs with DATE-TIME values must be in UTC, we could validate # that fact, for now we take it on faith. return DateTimeBehavior . transformToNative ( obj ) else : raise ParseError ( "VALUE must be DURATION or DATE-TIME" )
Turn obj . value into a timedelta or datetime .
279
12
233,962
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True if obj . value == '' : obj . value = [ ] return obj tzinfo = getTzid ( getattr ( obj , 'tzid_param' , None ) ) obj . value = [ stringToPeriod ( x , tzinfo ) for x in obj . value . split ( "," ) ] return obj
Convert comma separated periods into tuples .
92
9
233,963
def transformFromNative ( cls , obj ) : if obj . isNative : obj . isNative = False transformed = [ ] for tup in obj . value : transformed . append ( periodToString ( tup , cls . forceUTC ) ) if len ( transformed ) > 0 : tzid = TimezoneComponent . registerTzinfo ( tup [ 0 ] . tzinfo ) if not cls . forceUTC and tzid is not None : obj . tzid_param = tzid obj . value = ',' . join ( transformed ) return obj
Convert the list of tuples in obj . value to strings .
123
14
233,964
def serializeFields ( obj , order = None ) : fields = [ ] if order is None : fields = [ backslashEscape ( val ) for val in obj ] else : for field in order : escapedValueList = [ backslashEscape ( val ) for val in toList ( getattr ( obj , field ) ) ] fields . append ( ',' . join ( escapedValueList ) ) return ';' . join ( fields )
Turn an object s fields into a ; and seperated string .
96
14
233,965
def toString ( val , join_char = '\n' ) : if type ( val ) in ( list , tuple ) : return join_char . join ( val ) return val
Turn a string or array value into a string .
39
10
233,966
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = Name ( * * dict ( zip ( NAME_ORDER , splitFields ( obj . value ) ) ) ) return obj
Turn obj . value into a Name .
52
8
233,967
def transformFromNative ( obj ) : obj . isNative = False obj . value = serializeFields ( obj . value , NAME_ORDER ) return obj
Replace the Name in obj . value with a string .
34
12
233,968
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = Address ( * * dict ( zip ( ADDRESS_ORDER , splitFields ( obj . value ) ) ) ) return obj
Turn obj . value into an Address .
54
8
233,969
def transformToNative ( obj ) : if obj . isNative : return obj obj . isNative = True obj . value = splitFields ( obj . value ) return obj
Turn obj . value into a list .
36
8
233,970
def decode ( cls , line ) : if line . encoded : if 'BASE64' in line . singletonparams : line . singletonparams . remove ( 'BASE64' ) line . encoding_param = cls . base64string encoding = getattr ( line , 'encoding_param' , None ) if encoding : line . value = codecs . decode ( line . value . encode ( "utf-8" ) , "base64" ) else : line . value = stringToTextValues ( line . value ) [ 0 ] line . encoded = False
Remove backslash escaping from line . valueDecode line either to remove backslash espacing or to decode base64 encoding . The content line should contain a ENCODING = b for base64 encoding but Apple Addressbook seems to export a singleton parameter of BASE64 which does not match the 3 . 0 vCard spec . If we encouter that then we transform the parameter to ENCODING = b
122
86
233,971
def validate ( cls , obj , raiseException = False , complainUnrecognized = False ) : if not cls . allowGroup and obj . group is not None : err = "{0} has a group, but this object doesn't support groups" . format ( obj ) raise base . VObjectError ( err ) if isinstance ( obj , base . ContentLine ) : return cls . lineValidate ( obj , raiseException , complainUnrecognized ) elif isinstance ( obj , base . Component ) : count = { } for child in obj . getChildren ( ) : if not child . validate ( raiseException , complainUnrecognized ) : return False name = child . name . upper ( ) count [ name ] = count . get ( name , 0 ) + 1 for key , val in cls . knownChildren . items ( ) : if count . get ( key , 0 ) < val [ 0 ] : if raiseException : m = "{0} components must contain at least {1} {2}" raise base . ValidateError ( m . format ( cls . name , val [ 0 ] , key ) ) return False if val [ 1 ] and count . get ( key , 0 ) > val [ 1 ] : if raiseException : m = "{0} components cannot contain more than {1} {2}" raise base . ValidateError ( m . format ( cls . name , val [ 1 ] , key ) ) return False return True else : err = "{0} is not a Component or Contentline" . format ( obj ) raise base . VObjectError ( err )
Check if the object satisfies this behavior s requirements .
337
10
233,972
def pickNthWeekday ( year , month , dayofweek , hour , minute , whichweek ) : first = datetime . datetime ( year = year , month = month , hour = hour , minute = minute , day = 1 ) weekdayone = first . replace ( day = ( ( dayofweek - first . isoweekday ( ) ) % 7 + 1 ) ) for n in xrange ( whichweek - 1 , - 1 , - 1 ) : dt = weekdayone + n * WEEKS if dt . month == month : return dt
dayofweek == 0 means Sunday whichweek > 4 means last instance
121
14
233,973
def deleteExtraneous ( component , ignore_dtstamp = False ) : for comp in component . components ( ) : deleteExtraneous ( comp , ignore_dtstamp ) for line in component . lines ( ) : if 'X-VOBJ-ORIGINAL-TZID' in line . params : del line . params [ 'X-VOBJ-ORIGINAL-TZID' ] if ignore_dtstamp and hasattr ( component , 'dtstamp_list' ) : del component . dtstamp_list
Recursively walk the component s children deleting extraneous details like X - VOBJ - ORIGINAL - TZID .
120
26
233,974
def fillPelicanHole ( site , username , password , tstat_name , start_time , end_time ) : start = datetime . strptime ( start_time , _INPUT_TIME_FORMAT ) . replace ( tzinfo = pytz . utc ) . astimezone ( _pelican_time ) end = datetime . strptime ( end_time , _INPUT_TIME_FORMAT ) . replace ( tzinfo = pytz . utc ) . astimezone ( _pelican_time ) heat_needs_fan = _lookupHeatNeedsFan ( site , username , password , tstat_name ) if heat_needs_fan is None : return None # Pelican's API only allows a query covering a time range of up to 1 month # So we may need run multiple requests for historical data history_blocks = [ ] while start < end : block_start = start block_end = min ( start + timedelta ( days = 30 ) , end ) blocks = _lookupHistoricalData ( site , username , password , tstat_name , block_start , block_end ) if blocks is None : return None history_blocks . extend ( blocks ) start += timedelta ( days = 30 , minutes = 1 ) output_rows = [ ] for block in history_blocks : runStatus = block . find ( "runStatus" ) . text if runStatus . startswith ( "Heat" ) : fanState = ( heatNeedsFan == "Yes" ) else : fanState = ( runStatus != "Off" ) api_time = datetime . strptime ( block . find ( "timestamp" ) . text , "%Y-%m-%dT%H:%M" ) . replace ( tzinfo = _pelican_time ) # Need to convert seconds to nanoseconds timestamp = int ( api_time . timestamp ( ) * 10 ** 9 ) output_rows . append ( { "temperature" : float ( block . find ( "temperature" ) . text ) , "relative_humidity" : float ( block . find ( "humidity" ) . text ) , "heating_setpoint" : float ( block . find ( "heatSetting" ) . text ) , "cooling_setpoint" : float ( block . find ( "coolSetting" ) . text ) , # Driver explicitly uses "Schedule" field, but we don't have this in history "override" : block . find ( "setBy" ) . text != "Schedule" , "fan" : fanState , "mode" : _mode_name_mappings [ block . find ( "system" ) . text ] , "state" : _state_mappings . get ( runStatus , 0 ) , "time" : timestamp , } ) df = pd . DataFrame ( output_rows ) df . drop_duplicates ( subset = "time" , keep = "first" , inplace = True ) return df
Fill a hole in a Pelican thermostat s data stream .
650
14
233,975
def add_degree_days ( self , col = 'OAT' , hdh_cpoint = 65 , cdh_cpoint = 65 ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data # Calculate hdh data [ 'hdh' ] = data [ col ] over_hdh = data . loc [ : , col ] > hdh_cpoint data . loc [ over_hdh , 'hdh' ] = 0 data . loc [ ~ over_hdh , 'hdh' ] = hdh_cpoint - data . loc [ ~ over_hdh , col ] # Calculate cdh data [ 'cdh' ] = data [ col ] under_cdh = data . loc [ : , col ] < cdh_cpoint data . loc [ under_cdh , 'cdh' ] = 0 data . loc [ ~ under_cdh , 'cdh' ] = data . loc [ ~ under_cdh , col ] - cdh_cpoint self . preprocessed_data = data
Adds Heating & Cooling Degree Hours .
243
9
233,976
def add_col_features ( self , col = None , degree = None ) : if not col and not degree : return else : if isinstance ( col , list ) and isinstance ( degree , list ) : if len ( col ) != len ( degree ) : print ( 'col len: ' , len ( col ) ) print ( 'degree len: ' , len ( degree ) ) raise ValueError ( 'col and degree should have equal length.' ) else : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data for i in range ( len ( col ) ) : data . loc [ : , col [ i ] + str ( degree [ i ] ) ] = pow ( data . loc [ : , col [ i ] ] , degree [ i ] ) / pow ( 10 , degree [ i ] - 1 ) self . preprocessed_data = data else : raise TypeError ( 'col and degree should be lists.' )
Exponentiate columns of dataframe .
213
8
233,977
def standardize ( self ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data scaler = preprocessing . StandardScaler ( ) data = pd . DataFrame ( scaler . fit_transform ( data ) , columns = data . columns , index = data . index ) self . preprocessed_data = data
Standardize data .
86
4
233,978
def normalize ( self ) : if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data data = pd . DataFrame ( preprocessing . normalize ( data ) , columns = data . columns , index = data . index ) self . preprocessed_data = data
Normalize data .
74
4
233,979
def add_time_features ( self , year = False , month = False , week = True , tod = True , dow = True ) : var_to_expand = [ ] if self . preprocessed_data . empty : data = self . original_data else : data = self . preprocessed_data if year : data [ "year" ] = data . index . year var_to_expand . append ( "year" ) if month : data [ "month" ] = data . index . month var_to_expand . append ( "month" ) if week : data [ "week" ] = data . index . week var_to_expand . append ( "week" ) if tod : data [ "tod" ] = data . index . hour var_to_expand . append ( "tod" ) if dow : data [ "dow" ] = data . index . weekday var_to_expand . append ( "dow" ) # One-hot encode the time features for var in var_to_expand : add_var = pd . get_dummies ( data [ var ] , prefix = var , drop_first = True ) # Add all the columns to the model data data = data . join ( add_var ) # Drop the original column that was expanded data . drop ( columns = [ var ] , inplace = True ) self . preprocessed_data = data
Add time features to dataframe .
310
7
233,980
def split_data ( self ) : try : # Extract data ranging in time_period1 time_period1 = ( slice ( self . baseline_period [ 0 ] , self . baseline_period [ 1 ] ) ) self . baseline_in = self . original_data . loc [ time_period1 , self . input_col ] self . baseline_out = self . original_data . loc [ time_period1 , self . output_col ] if self . exclude_time_period : for i in range ( 0 , len ( self . exclude_time_period ) , 2 ) : # Drop data ranging in exclude_time_period1 exclude_time_period1 = ( slice ( self . exclude_time_period [ i ] , self . exclude_time_period [ i + 1 ] ) ) self . baseline_in . drop ( self . baseline_in . loc [ exclude_time_period1 ] . index , axis = 0 , inplace = True ) self . baseline_out . drop ( self . baseline_out . loc [ exclude_time_period1 ] . index , axis = 0 , inplace = True ) except Exception as e : raise e # CHECK: Can optimize this part # Error checking to ensure time_period values are valid if self . projection_period : for i in range ( 0 , len ( self . projection_period ) , 2 ) : period = ( slice ( self . projection_period [ i ] , self . projection_period [ i + 1 ] ) ) try : self . original_data . loc [ period , self . input_col ] self . original_data . loc [ period , self . output_col ] except Exception as e : raise e
Split data according to baseline and projection time period values .
361
11
233,981
def linear_regression ( self ) : model = LinearRegression ( ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = sum ( scores ) / len ( scores ) self . models . append ( model ) self . model_names . append ( 'Linear Regression' ) self . max_scores . append ( mean_score ) self . metrics [ 'Linear Regression' ] = { } self . metrics [ 'Linear Regression' ] [ 'R2' ] = mean_score self . metrics [ 'Linear Regression' ] [ 'Adj R2' ] = self . adj_r2 ( mean_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] )
Linear Regression .
276
5
233,982
def lasso_regression ( self ) : score_list = [ ] max_score = float ( '-inf' ) best_alpha = None for alpha in self . alphas : # model = Lasso(normalize=True, alpha=alpha, max_iter=5000) model = Lasso ( alpha = alpha , max_iter = 5000 ) model . fit ( self . baseline_in , self . baseline_out . values . ravel ( ) ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = np . mean ( scores ) score_list . append ( mean_score ) if mean_score > max_score : max_score = mean_score best_alpha = alpha # self.models.append(Lasso(normalize=True, alpha=best_alpha, max_iter=5000)) self . models . append ( Lasso ( alpha = best_alpha , max_iter = 5000 ) ) self . model_names . append ( 'Lasso Regression' ) self . max_scores . append ( max_score ) self . metrics [ 'Lasso Regression' ] = { } self . metrics [ 'Lasso Regression' ] [ 'R2' ] = max_score self . metrics [ 'Lasso Regression' ] [ 'Adj R2' ] = self . adj_r2 ( max_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] )
Lasso Regression .
428
5
233,983
def random_forest ( self ) : model = RandomForestRegressor ( random_state = 42 ) scores = [ ] kfold = KFold ( n_splits = self . cv , shuffle = True , random_state = 42 ) for i , ( train , test ) in enumerate ( kfold . split ( self . baseline_in , self . baseline_out ) ) : model . fit ( self . baseline_in . iloc [ train ] , self . baseline_out . iloc [ train ] ) scores . append ( model . score ( self . baseline_in . iloc [ test ] , self . baseline_out . iloc [ test ] ) ) mean_score = np . mean ( scores ) self . models . append ( model ) self . model_names . append ( 'Random Forest Regressor' ) self . max_scores . append ( mean_score ) self . metrics [ 'Random Forest Regressor' ] = { } self . metrics [ 'Random Forest Regressor' ] [ 'R2' ] = mean_score self . metrics [ 'Random Forest Regressor' ] [ 'Adj R2' ] = self . adj_r2 ( mean_score , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] )
Random Forest .
278
3
233,984
def run_models ( self ) : self . linear_regression ( ) self . lasso_regression ( ) self . ridge_regression ( ) self . elastic_net_regression ( ) self . random_forest ( ) self . ann ( ) # Index of the model with max score best_model_index = self . max_scores . index ( max ( self . max_scores ) ) # Store name of the optimal model self . best_model_name = self . model_names [ best_model_index ] # Store optimal model self . best_model = self . models [ best_model_index ] return self . metrics
Run all models .
139
4
233,985
def custom_model ( self , func ) : y_pred = func ( self . baseline_in , self . baseline_out ) self . custom_metrics = { } self . custom_metrics [ 'r2' ] = r2_score ( self . baseline_out , y_pred ) self . custom_metrics [ 'mse' ] = mean_squared_error ( self . baseline_out , y_pred ) self . custom_metrics [ 'rmse' ] = math . sqrt ( self . custom_metrics [ 'mse' ] ) self . custom_metrics [ 'adj_r2' ] = self . adj_r2 ( self . custom_metrics [ 'r2' ] , self . baseline_in . shape [ 0 ] , self . baseline_in . shape [ 1 ] ) return self . custom_metrics
Run custom model provided by user .
190
7
233,986
def best_model_fit ( self ) : self . best_model . fit ( self . baseline_in , self . baseline_out ) self . y_true = self . baseline_out # Pandas Series self . y_pred = self . best_model . predict ( self . baseline_in ) # numpy.ndarray # Set all negative values to zero since energy > 0 self . y_pred [ self . y_pred < 0 ] = 0 # n and k values for adj r2 score self . n_test = self . baseline_in . shape [ 0 ] # Number of points in data sample self . k_test = self . baseline_in . shape [ 1 ] # Number of variables in model, excluding the constant # Store best model's metrics self . best_metrics [ 'name' ] = self . best_model_name self . best_metrics [ 'r2' ] = r2_score ( self . y_true , self . y_pred ) self . best_metrics [ 'mse' ] = mean_squared_error ( self . y_true , self . y_pred ) self . best_metrics [ 'rmse' ] = math . sqrt ( self . best_metrics [ 'mse' ] ) self . best_metrics [ 'adj_r2' ] = self . adj_r2 ( self . best_metrics [ 'r2' ] , self . n_test , self . k_test ) # Normalized Mean Bias Error numerator = sum ( self . y_true - self . y_pred ) denominator = ( self . n_test - self . k_test ) * ( sum ( self . y_true ) / len ( self . y_true ) ) self . best_metrics [ 'nmbe' ] = numerator / denominator # MAPE can't have 0 values in baseline_out -> divide by zero error self . baseline_out_copy = self . baseline_out [ self . baseline_out != 0 ] self . baseline_in_copy = self . baseline_in [ self . baseline_in . index . isin ( self . baseline_out_copy . index ) ] self . y_true_copy = self . baseline_out_copy # Pandas Series self . y_pred_copy = self . best_model . predict ( self . baseline_in_copy ) # numpy.ndarray self . best_metrics [ 'mape' ] = np . mean ( np . abs ( ( self . y_true_copy - self . y_pred_copy ) / self . y_true_copy ) ) * 100 return self . best_metrics
Fit data to optimal model and return its metrics .
581
10
233,987
def correlation_plot ( self , data ) : # CHECK: Add saved filename in result.json fig = plt . figure ( Plot_Data . count ) corr = data . corr ( ) ax = sns . heatmap ( corr ) Plot_Data . count += 1 return fig
Create heatmap of Pearson s correlation coefficient .
63
9
233,988
def baseline_projection_plot ( self , y_true , y_pred , baseline_period , projection_period , model_name , adj_r2 , data , input_col , output_col , model , site ) : # Baseline and projection plots fig = plt . figure ( Plot_Data . count ) # Number of plots to display if projection_period : nrows = len ( baseline_period ) + len ( projection_period ) / 2 else : nrows = len ( baseline_period ) / 2 # Plot 1 - Baseline base_df = pd . DataFrame ( ) base_df [ 'y_true' ] = y_true base_df [ 'y_pred' ] = y_pred ax1 = fig . add_subplot ( nrows , 1 , 1 ) base_df . plot ( ax = ax1 , figsize = self . figsize , title = 'Baseline Period ({}-{}). \nBest Model: {}. \nBaseline Adj R2: {}. \nSite: {}.' . format ( baseline_period [ 0 ] , baseline_period [ 1 ] , model_name , adj_r2 , site ) ) if projection_period : # Display projection plots num_plot = 2 for i in range ( 0 , len ( projection_period ) , 2 ) : ax = fig . add_subplot ( nrows , 1 , num_plot ) period = ( slice ( projection_period [ i ] , projection_period [ i + 1 ] ) ) project_df = pd . DataFrame ( ) try : project_df [ 'y_true' ] = data . loc [ period , output_col ] project_df [ 'y_pred' ] = model . predict ( data . loc [ period , input_col ] ) # Set all negative values to zero since energy > 0 project_df [ 'y_pred' ] [ project_df [ 'y_pred' ] < 0 ] = 0 project_df . plot ( ax = ax , figsize = self . figsize , title = 'Projection Period ({}-{})' . format ( projection_period [ i ] , projection_period [ i + 1 ] ) ) num_plot += 1 fig . tight_layout ( ) Plot_Data . count += 1 return fig , project_df [ 'y_true' ] , project_df [ 'y_pred' ] except : raise TypeError ( "If projecting into the future, please specify project_ind_col that has data available \ in the future time period requested." ) return fig , None , None
Create baseline and projection plots .
557
6
233,989
def get_thermostat_meter_data ( zone ) : meter_uri = zone2meter . get ( zone , "None" ) data = [ ] def cb ( msg ) : for po in msg . payload_objects : if po . type_dotted == ( 2 , 0 , 9 , 1 ) : m = msgpack . unpackb ( po . content ) data . append ( m [ 'current_demand' ] ) handle = c . subscribe ( meter_uri + "/signal/meter" , cb ) def stop ( ) : c . unsubscribe ( handle ) return data return stop
This method subscribes to the output of the meter for the given zone . It returns a handler to call when you want to stop subscribing data which returns a list of the data readins over that time period
130
41
233,990
def call_heat ( tstat ) : current_hsp , current_csp = tstat . heating_setpoint , tstat . cooling_setpoint current_temp = tstat . temperature tstat . write ( { 'heating_setpoint' : current_temp + 10 , 'cooling_setpoint' : current_temp + 20 , 'mode' : HEAT , } ) def restore ( ) : tstat . write ( { 'heating_setpoint' : current_hsp , 'cooling_setpoint' : current_csp , 'mode' : AUTO , } ) return restore
Adjusts the temperature setpoints in order to call for heating . Returns a handler to call when you want to reset the thermostat
133
27
233,991
def call_cool ( tstat ) : current_hsp , current_csp = tstat . heating_setpoint , tstat . cooling_setpoint current_temp = tstat . temperature tstat . write ( { 'heating_setpoint' : current_temp - 20 , 'cooling_setpoint' : current_temp - 10 , 'mode' : COOL , } ) def restore ( ) : tstat . write ( { 'heating_setpoint' : current_hsp , 'cooling_setpoint' : current_csp , 'mode' : AUTO , } ) return restore
Adjusts the temperature setpoints in order to call for cooling . Returns a handler to call when you want to reset the thermostat
133
27
233,992
def call_fan ( tstat ) : old_fan = tstat . fan tstat . write ( { 'fan' : not old_fan , } ) def restore ( ) : tstat . write ( { 'fan' : old_fan , } ) return restore
Toggles the fan
57
4
233,993
def _load_csv ( self , file_name , folder_name , head_row , index_col , convert_col , concat_files ) : # Denotes all csv files if file_name == "*" : if not os . path . isdir ( folder_name ) : raise OSError ( 'Folder does not exist.' ) else : file_name_list = sorted ( glob . glob ( folder_name + '*.csv' ) ) if not file_name_list : raise OSError ( 'Either the folder does not contain any csv files or invalid folder provided.' ) else : # Call previous function again with parameters changed (file_name=file_name_list, folder_name=None) # Done to reduce redundancy of code self . import_csv ( file_name = file_name_list , head_row = head_row , index_col = index_col , convert_col = convert_col , concat_files = concat_files ) return self . data else : if not os . path . isdir ( folder_name ) : raise OSError ( 'Folder does not exist.' ) else : path = os . path . join ( folder_name , file_name ) if head_row > 0 : data = pd . read_csv ( path , index_col = index_col , skiprows = [ i for i in range ( head_row - 1 ) ] ) else : data = pd . read_csv ( path , index_col = index_col ) # Convert time into datetime format try : # Special case format 1/4/14 21:30 data . index = pd . to_datetime ( data . index , format = '%m/%d/%y %H:%M' ) except : data . index = pd . to_datetime ( data . index , dayfirst = False , infer_datetime_format = True ) # Convert all columns to numeric type if convert_col : # Check columns in dataframe to see if they are numeric for col in data . columns : # If particular column is not numeric, then convert to numeric type if data [ col ] . dtype != np . number : data [ col ] = pd . to_numeric ( data [ col ] , errors = "coerce" ) return data
Load single csv file .
504
6
233,994
def convert_to_utc ( time ) : # time is already in UTC if 'Z' in time : return time else : time_formatted = time [ : - 3 ] + time [ - 2 : ] dt = datetime . strptime ( time_formatted , '%Y-%m-%dT%H:%M:%S%z' ) dt = dt . astimezone ( timezone ( 'UTC' ) ) return dt . strftime ( '%Y-%m-%dT%H:%M:%SZ' )
Convert time to UTC
129
5
233,995
def get_meter ( self , site , start , end , point_type = 'Green_Button_Meter' , var = "meter" , agg = 'MEAN' , window = '24h' , aligned = True , return_names = True ) : # Convert time to UTC start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) request = self . compose_MDAL_dic ( point_type = point_type , site = site , start = start , end = end , var = var , agg = agg , window = window , aligned = aligned ) resp = self . m . query ( request ) if return_names : resp = self . replace_uuid_w_names ( resp ) return resp
Get meter data from MDAL .
168
7
233,996
def get_tstat ( self , site , start , end , var = "tstat_temp" , agg = 'MEAN' , window = '24h' , aligned = True , return_names = True ) : # Convert time to UTC start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) point_map = { "tstat_state" : "Thermostat_Status" , "tstat_hsp" : "Supply_Air_Temperature_Heating_Setpoint" , "tstat_csp" : "Supply_Air_Temperature_Cooling_Setpoint" , "tstat_temp" : "Temperature_Sensor" } if isinstance ( var , list ) : point_type = [ point_map [ point_type ] for point_type in var ] # list of all the point names using BRICK classes else : point_type = point_map [ var ] # single value using BRICK classes request = self . compose_MDAL_dic ( point_type = point_type , site = site , start = start , end = end , var = var , agg = agg , window = window , aligned = aligned ) resp = self . m . query ( request ) if return_names : resp = self . replace_uuid_w_names ( resp ) return resp
Get thermostat data from MDAL .
297
9
233,997
def compose_MDAL_dic ( self , site , point_type , start , end , var , agg , window , aligned , points = None , return_names = False ) : # Convert time to UTC start = self . convert_to_utc ( start ) end = self . convert_to_utc ( end ) request = { } # Add Time Details - single set for one or multiple series request [ 'Time' ] = { 'Start' : start , 'End' : end , 'Window' : window , 'Aligned' : aligned } # Define Variables request [ "Variables" ] = { } request [ 'Composition' ] = [ ] request [ 'Aggregation' ] = { } if isinstance ( point_type , str ) : # if point_type is a string -> single type of point requested request [ "Variables" ] [ var ] = self . compose_BRICK_query ( point_type = point_type , site = site ) # pass one point type at the time request [ 'Composition' ] = [ var ] request [ 'Aggregation' ] [ var ] = [ agg ] elif isinstance ( point_type , list ) : # loop through all the point_types and create one section of the brick query at the time for idx , point in enumerate ( point_type ) : request [ "Variables" ] [ var [ idx ] ] = self . compose_BRICK_query ( point_type = point , site = site ) # pass one point type at the time request [ 'Composition' ] . append ( var [ idx ] ) if isinstance ( agg , str ) : # if agg is a string -> single type of aggregation requested request [ 'Aggregation' ] [ var [ idx ] ] = [ agg ] elif isinstance ( agg , list ) : # if agg is a list -> expected one agg per point request [ 'Aggregation' ] [ var [ idx ] ] = [ agg [ idx ] ] return request
Create dictionary for MDAL request .
433
7
233,998
def get_point_name ( self , context ) : metadata_table = self . parse_context ( context ) return metadata_table . apply ( self . strip_point_name , axis = 1 )
Get point name .
43
4
233,999
def replace_uuid_w_names ( self , resp ) : col_mapper = self . get_point_name ( resp . context ) [ "?point" ] . to_dict ( ) resp . df . rename ( columns = col_mapper , inplace = True ) return resp
Replace the uuid s with names .
64
9