idx int64 0 63k | question stringlengths 61 4.03k | target stringlengths 6 1.23k |
|---|---|---|
48,500 | def info ( self ) : status , n_datasets , n_file_attrs = _C . SDfileinfo ( self . _id ) _checkErr ( 'info' , status , "cannot execute" ) return n_datasets , n_file_attrs | Retrieve information about the SD interface . |
48,501 | def nametoindex ( self , sds_name ) : sds_idx = _C . SDnametoindex ( self . _id , sds_name ) _checkErr ( 'nametoindex' , sds_idx , 'non existent SDS' ) return sds_idx | Return the index number of a dataset given the dataset name . |
48,502 | def reftoindex ( self , sds_ref ) : sds_idx = _C . SDreftoindex ( self . _id , sds_ref ) _checkErr ( 'reftoindex' , sds_idx , 'illegal SDS ref number' ) return sds_idx | Returns the index number of a dataset given the dataset reference number . |
48,503 | def setfillmode ( self , fill_mode ) : if not fill_mode in [ SDC . FILL , SDC . NOFILL ] : raise HDF4Error ( "bad fill mode" ) old_mode = _C . SDsetfillmode ( self . _id , fill_mode ) _checkErr ( 'setfillmode' , old_mode , 'cannot execute' ) return old_mode | Set the fill mode for all the datasets in the file . |
48,504 | def select ( self , name_or_index ) : if isinstance ( name_or_index , type ( 1 ) ) : idx = name_or_index else : try : idx = self . nametoindex ( name_or_index ) except HDF4Error : raise HDF4Error ( "select: non-existent dataset" ) id = _C . SDselect ( self . _id , idx ) _checkErr ( 'select' , id , "cannot execute" ) return SDS ( self , id ) | Locate a dataset . |
48,505 | def attributes ( self , full = 0 ) : nsds , natts = self . info ( ) res = { } for n in range ( natts ) : a = self . attr ( n ) name , aType , nVal = a . info ( ) if full : res [ name ] = ( a . get ( ) , a . index ( ) , aType , nVal ) else : res [ name ] = a . get ( ) return res | Return a dictionnary describing every global attribute attached to the SD interface . |
48,506 | def datasets ( self ) : nDs = self . info ( ) [ 0 ] res = { } for n in range ( nDs ) : v = self . select ( n ) vName , vRank , vLen , vType , vAtt = v . info ( ) if vRank < 2 : vLen = [ vLen ] dimNames = [ ] dimLengths = [ ] for dimNum in range ( vRank ) : d = v . dim ( dimNum ) dimNames . append ( d . info ( ) [ 0 ] ) dimLengths . append ( vLen [ dimNum ] ) res [ vName ] = ( tuple ( dimNames ) , tuple ( dimLengths ) , vType , n ) return res | Return a dictionnary describing all the file datasets . |
48,507 | def endaccess ( self ) : status = _C . SDendaccess ( self . _id ) _checkErr ( 'endaccess' , status , "cannot execute" ) self . _id = None | Terminates access to the SDS . |
48,508 | def dim ( self , dim_index ) : id = _C . SDgetdimid ( self . _id , dim_index ) _checkErr ( 'dim' , id , 'invalid SDS identifier or dimension index' ) return SDim ( self , id , dim_index ) | Get an SDim instance given a dimension index number . |
48,509 | def get ( self , start = None , count = None , stride = None ) : try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) if isinstance ( dim_sizes , type ( 1 ) ) : dim_sizes = [ dim_sizes ] except HDF4Error : raise HDF4Error ( 'get : cannot execute' ) if start is None : start = [ 0 ] * rank elif isinstance ( start , type ( 1 ) ) : start = [ start ] if count is None : count = dim_sizes if count [ 0 ] == 0 : count [ 0 ] = 1 elif isinstance ( count , type ( 1 ) ) : count = [ count ] if stride is None : stride = [ 1 ] * rank elif isinstance ( stride , type ( 1 ) ) : stride = [ stride ] if len ( start ) != rank or len ( count ) != rank or len ( stride ) != rank : raise HDF4Error ( 'get : start, stride or count ' 'do not match SDS rank' ) for n in range ( rank ) : if start [ n ] < 0 or start [ n ] + ( abs ( count [ n ] ) - 1 ) * stride [ n ] >= dim_sizes [ n ] : raise HDF4Error ( 'get arguments violate ' 'the size (%d) of dimension %d' % ( dim_sizes [ n ] , n ) ) if not data_type in SDC . equivNumericTypes : raise HDF4Error ( 'get cannot currrently deal with ' 'the SDS data type' ) return _C . _SDreaddata_0 ( self . _id , data_type , start , count , stride ) | Read data from the dataset . |
48,510 | def set ( self , data , start = None , count = None , stride = None ) : try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) if isinstance ( dim_sizes , type ( 1 ) ) : dim_sizes = [ dim_sizes ] except HDF4Error : raise HDF4Error ( 'set : cannot execute' ) if start is None : start = [ 0 ] * rank elif isinstance ( start , type ( 1 ) ) : start = [ start ] if count is None : count = dim_sizes if count [ 0 ] == 0 : count [ 0 ] = 1 elif isinstance ( count , type ( 1 ) ) : count = [ count ] if stride is None : stride = [ 1 ] * rank elif isinstance ( stride , type ( 1 ) ) : stride = [ stride ] if len ( start ) != rank or len ( count ) != rank or len ( stride ) != rank : raise HDF4Error ( 'set : start, stride or count ' 'do not match SDS rank' ) unlimited = self . isrecord ( ) for n in range ( rank ) : ok = 1 if start [ n ] < 0 : ok = 0 elif n > 0 or not unlimited : if start [ n ] + ( abs ( count [ n ] ) - 1 ) * stride [ n ] >= dim_sizes [ n ] : ok = 0 if not ok : raise HDF4Error ( 'set arguments violate ' 'the size (%d) of dimension %d' % ( dim_sizes [ n ] , n ) ) if not data_type in SDC . equivNumericTypes : raise HDF4Error ( 'set cannot currrently deal ' 'with the SDS data type' ) _C . _SDwritedata_0 ( self . _id , data_type , start , count , data , stride ) | Write data to the dataset . |
48,511 | def info ( self ) : buf = _C . array_int32 ( _C . H4_MAX_VAR_DIMS ) status , sds_name , rank , data_type , n_attrs = _C . SDgetinfo ( self . _id , buf ) _checkErr ( 'info' , status , "cannot execute" ) dim_sizes = _array_to_ret ( buf , rank ) return sds_name , rank , dim_sizes , data_type , n_attrs | Retrieves information about the dataset . |
48,512 | def checkempty ( self ) : status , emptySDS = _C . SDcheckempty ( self . _id ) _checkErr ( 'checkempty' , status , 'invalid SDS identifier' ) return emptySDS | Determine whether the dataset is empty . |
48,513 | def ref ( self ) : sds_ref = _C . SDidtoref ( self . _id ) _checkErr ( 'idtoref' , sds_ref , 'illegal SDS identifier' ) return sds_ref | Get the reference number of the dataset . |
48,514 | def getcal ( self ) : status , cal , cal_error , offset , offset_err , data_type = _C . SDgetcal ( self . _id ) _checkErr ( 'getcal' , status , 'no calibration record' ) return cal , cal_error , offset , offset_err , data_type | Retrieve the SDS calibration coefficients . |
48,515 | def getdatastrs ( self ) : status , label , unit , format , coord_system = _C . SDgetdatastrs ( self . _id , 128 ) _checkErr ( 'getdatastrs' , status , 'cannot execute' ) return label , unit , format , coord_system | Retrieve the dataset standard string attributes . |
48,516 | def getrange ( self ) : try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'getrange : invalid SDS identifier' ) n_values = 1 convert = _array_to_ret if data_type == SDC . CHAR8 : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) convert = _array_to_str elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf1 = _C . array_int8 ( n_values ) buf2 = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf1 = _C . array_int16 ( n_values ) buf2 = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf1 = _C . array_uint16 ( n_values ) buf2 = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf1 = _C . array_int32 ( n_values ) buf2 = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf1 = _C . array_uint32 ( n_values ) buf2 = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf1 = _C . array_float32 ( n_values ) buf2 = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf1 = _C . array_float64 ( n_values ) buf2 = _C . array_float64 ( n_values ) else : raise HDF4Error ( "getrange: SDS has an illegal or " "unsupported type %d" % data ) status = _C . SDgetrange ( self . _id , buf1 , buf2 ) _checkErr ( 'getrange' , status , 'range not set' ) return convert ( buf2 , n_values ) , convert ( buf1 , n_values ) | Retrieve the dataset min and max values . |
48,517 | def setcal ( self , cal , cal_error , offset , offset_err , data_type ) : status = _C . SDsetcal ( self . _id , cal , cal_error , offset , offset_err , data_type ) _checkErr ( 'setcal' , status , 'cannot execute' ) | Set the dataset calibration coefficients . |
48,518 | def setdatastrs ( self , label , unit , format , coord_sys ) : status = _C . SDsetdatastrs ( self . _id , label , unit , format , coord_sys ) _checkErr ( 'setdatastrs' , status , 'cannot execute' ) | Set the dataset standard string type attributes . |
48,519 | def setfillvalue ( self , fill_val ) : try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'setfillvalue : cannot execute' ) n_values = 1 if data_type == SDC . CHAR8 : buf = _C . array_byte ( n_values ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "setfillvalue: SDS has an illegal or " "unsupported type %d" % data_type ) buf [ 0 ] = fill_val status = _C . SDsetfillvalue ( self . _id , buf ) _checkErr ( 'setfillvalue' , status , 'cannot execute' ) | Set the dataset fill value . |
48,520 | def setrange ( self , min , max ) : try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'setrange : cannot execute' ) n_values = 1 if data_type == SDC . CHAR8 : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf1 = _C . array_int8 ( n_values ) buf2 = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf1 = _C . array_int16 ( n_values ) buf2 = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf1 = _C . array_uint16 ( n_values ) buf2 = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf1 = _C . array_int32 ( n_values ) buf2 = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf1 = _C . array_uint32 ( n_values ) buf2 = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf1 = _C . array_float32 ( n_values ) buf2 = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf1 = _C . array_float64 ( n_values ) buf2 = _C . array_float64 ( n_values ) else : raise HDF4Error ( "SDsetrange: SDS has an illegal or " "unsupported type %d" % data_type ) buf1 [ 0 ] = max buf2 [ 0 ] = min status = _C . SDsetrange ( self . _id , buf1 , buf2 ) _checkErr ( 'setrange' , status , 'cannot execute' ) | Set the dataset min and max values . |
48,521 | def getcompress ( self ) : status , comp_type , value , v2 , v3 , v4 , v5 = _C . _SDgetcompress ( self . _id ) _checkErr ( 'getcompress' , status , 'no compression' ) if comp_type == SDC . COMP_NONE : return ( comp_type , ) elif comp_type == SDC . COMP_SZIP : return comp_type , value , v2 , v3 , v4 , v5 else : return comp_type , value | Retrieves info about dataset compression type and mode . |
48,522 | def setcompress ( self , comp_type , value = 0 , v2 = 0 ) : status = _C . _SDsetcompress ( self . _id , comp_type , value , v2 ) _checkErr ( 'setcompress' , status , 'cannot execute' ) | Compresses the dataset using a specified compression method . |
48,523 | def setexternalfile ( self , filename , offset = 0 ) : status = _C . SDsetexternalfile ( self . _id , filename , offset ) _checkErr ( 'setexternalfile' , status , 'execution error' ) | Store the dataset data in an external file . |
48,524 | def dimensions ( self , full = 0 ) : nDims , dimLen = self . info ( ) [ 1 : 3 ] if isinstance ( dimLen , int ) : dimLen = [ dimLen ] unlim = self . isrecord ( ) res = { } for n in range ( nDims ) : d = self . dim ( n ) name , k , scaleType , nAtt = d . info ( ) length = dimLen [ n ] if full : res [ name ] = ( length , n , unlim and n == 0 , scaleType , nAtt ) else : res [ name ] = length return res | Return a dictionnary describing every dataset dimension . |
48,525 | def info ( self ) : status , dim_name , dim_size , data_type , n_attrs = _C . SDdiminfo ( self . _id ) _checkErr ( 'info' , status , 'cannot execute' ) return dim_name , dim_size , data_type , n_attrs | Return info about the dimension instance . |
48,526 | def setname ( self , dim_name ) : status = _C . SDsetdimname ( self . _id , dim_name ) _checkErr ( 'setname' , status , 'cannot execute' ) | Set the dimension name . |
48,527 | def getscale ( self ) : status , dim_name , dim_size , data_type , n_attrs = _C . SDdiminfo ( self . _id ) _checkErr ( 'getscale' , status , 'cannot execute' ) if data_type == 0 : raise HDF4Error ( "no scale set on that dimension" ) if dim_size == 0 : dim_size = self . _sds . info ( ) [ 2 ] [ self . _index ] if data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( dim_size ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( dim_size ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( dim_size ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( dim_size ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( dim_size ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( dim_size ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( dim_size ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( dim_size ) else : raise HDF4Error ( "getscale: dimension has an " "illegal or unsupported type %d" % data_type ) status = _C . SDgetdimscale ( self . _id , buf ) _checkErr ( 'getscale' , status , 'cannot execute' ) return _array_to_ret ( buf , dim_size ) | Obtain the scale values along a dimension . |
48,528 | def setscale ( self , data_type , scale ) : try : n_values = len ( scale ) except : n_values = 1 info = self . _sds . info ( ) if info [ 1 ] == 1 : dim_size = info [ 2 ] else : dim_size = info [ 2 ] [ self . _index ] if n_values != dim_size : raise HDF4Error ( 'number of scale values (%d) does not match ' 'dimension size (%d)' % ( n_values , dim_size ) ) if data_type == SDC . CHAR8 : buf = _C . array_byte ( n_values ) scale = list ( scale ) for n in range ( n_values ) : scale [ n ] = ord ( scale [ n ] ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "setscale: illegal or usupported data_type" ) if n_values == 1 : buf [ 0 ] = scale else : for n in range ( n_values ) : buf [ n ] = scale [ n ] status = _C . SDsetdimscale ( self . _id , n_values , data_type , buf ) _checkErr ( 'setscale' , status , 'cannot execute' ) | Initialize the scale values along the dimension . |
48,529 | def getstrs ( self ) : status , label , unit , format = _C . SDgetdimstrs ( self . _id , 128 ) _checkErr ( 'getstrs' , status , 'cannot execute' ) return label , unit , format | Retrieve the dimension standard string attributes . |
48,530 | def setstrs ( self , label , unit , format ) : status = _C . SDsetdimstrs ( self . _id , label , unit , format ) _checkErr ( 'setstrs' , status , 'cannot execute' ) | Set the dimension standard string attributes . |
48,531 | def attach ( self , num_name , write = 0 ) : mode = write and 'w' or 'r' if isinstance ( num_name , str ) : num = self . find ( num_name ) else : num = num_name vd = _C . VSattach ( self . _hdf_inst . _id , num , mode ) if vd < 0 : _checkErr ( 'attach' , vd , 'cannot attach vdata' ) return VD ( self , vd ) | Locate an existing vdata or create a new vdata in the HDF file returning a VD instance . |
48,532 | def create ( self , name , fields ) : try : vd = self . attach ( - 1 , 1 ) vd . _name = name allNames = [ ] for name , type , order in fields : vd . fdefine ( name , type , order ) allNames . append ( name ) vd . setfields ( * allNames ) return vd except HDF4Error as msg : raise HDF4Error ( "error creating vdata (%s)" % msg ) | Create a new vdata setting its name and allocating its fields . |
48,533 | def next ( self , vRef ) : num = _C . VSgetid ( self . _hdf_inst . _id , vRef ) _checkErr ( 'next' , num , 'cannot get next vdata' ) return num | Get the reference number of the vdata following a given vdata . |
48,534 | def vdatainfo ( self , listAttr = 0 ) : lst = [ ] ref = - 1 while True : try : nxtRef = self . next ( ref ) except HDF4Error : break ref = nxtRef vdObj = self . attach ( ref ) if listAttr or not vdObj . _isattr : lst . append ( ( vdObj . _name , vdObj . _class , vdObj . _refnum , vdObj . _nrecs , vdObj . _nfields , vdObj . _nattrs , vdObj . _recsize , vdObj . _tag , vdObj . _interlace ) ) vdObj . detach ( ) return lst | Return info about all the file vdatas . |
48,535 | def storedata ( self , fieldName , values , data_type , vName , vClass ) : nrecs = len ( values ) if type ( values [ 0 ] ) in [ list , tuple ] : order = len ( values [ 0 ] ) newValues = [ ] for el in values : for e in el : newValues . append ( e ) values = newValues else : order = 1 n_values = nrecs * order if data_type == HC . CHAR8 : buf = _C . array_byte ( n_values ) values = list ( values ) for n in range ( n_values ) : values [ n ] = ord ( values [ n ] ) elif data_type in [ HC . UCHAR8 , HC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == HC . INT8 : buf = _C . array_int8 ( n_values ) values = list ( values ) for n in range ( n_values ) : v = values [ n ] if v >= 0 : v &= 0x7f else : v = abs ( v ) & 0x7f if v : v = 256 - v else : v = 128 values [ n ] = v elif data_type == HC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == HC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == HC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == HC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == HC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == HC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "storedata: illegal or unimplemented data_type" ) for n in range ( n_values ) : buf [ n ] = values [ n ] if order == 1 : vd = _C . VHstoredata ( self . _hdf_inst . _id , fieldName , buf , nrecs , data_type , vName , vClass ) else : vd = _C . VHstoredatam ( self . _hdf_inst . _id , fieldName , buf , nrecs , data_type , vName , vClass , order ) _checkErr ( 'storedata' , vd , 'cannot create vdata' ) return vd | Create and initialize a single field vdata returning the vdata reference number . |
48,536 | def field ( self , name_index ) : if isinstance ( name_index , str ) : status , index = _C . VSfindex ( self . _id , name_index ) _checkErr ( 'field' , status , "illegal field name: %s" % name_index ) else : n = _C . VFnfields ( self . _id ) _checkErr ( 'field' , n , 'cannot execute' ) index = name_index if index >= n : raise HDF4Error ( "field: illegal index number" ) return VDField ( self , index ) | Get a VDField instance representing a field of the vdata . |
48,537 | def seek ( self , recIndex ) : if recIndex > self . _nrecs - 1 : if recIndex == self . _nrecs : return self . seekend ( ) else : raise HDF4Error ( "attempt to seek past last record" ) n = _C . VSseek ( self . _id , recIndex ) _checkErr ( 'seek' , n , 'cannot seek' ) self . _offset = n return n | Seek to the beginning of the record identified by its record index . A succeeding read will load this record in memory . |
48,538 | def inquire ( self ) : status , nRecs , interlace , fldNames , size , vName = _C . VSinquire ( self . _id ) _checkErr ( 'inquire' , status , "cannot query vdata info" ) return nRecs , interlace , fldNames . split ( ',' ) , size , vName | Retrieve info about the vdata . |
48,539 | def fieldinfo ( self ) : lst = [ ] for n in range ( self . _nfields ) : fld = self . field ( n ) lst . append ( ( fld . _name , fld . _type , fld . _order , fld . _nattrs , fld . _index , fld . _esize , fld . _isize ) ) return lst | Retrieve info about all vdata fields . |
48,540 | def sizeof ( self , fields ) : if type ( fields ) in [ tuple , list ] : str = ',' . join ( fields ) else : str = fields n = _C . VSsizeof ( self . _id , str ) _checkErr ( 'sizeof' , n , "cannot retrieve field sizes" ) return n | Retrieve the size in bytes of the given fields . |
48,541 | def fexist ( self , fields ) : if type ( fields ) in [ tuple , list ] : str = ',' . join ( fields ) else : str = fields ret = _C . VSfexist ( self . _id , str ) if ret < 0 : return 0 else : return 1 | Check if a vdata contains a given set of fields . |
48,542 | def find ( self , name ) : try : att = self . attr ( name ) if att . _index is None : att = None except HDF4Error : att = None return att | Search the field for a given attribute . |
48,543 | def set ( self , data_type , values ) : try : n_values = len ( values ) except : values = [ values ] n_values = 1 if data_type == HC . CHAR8 : buf = _C . array_byte ( n_values ) values = list ( values ) for n in range ( n_values ) : if not isinstance ( values [ n ] , int ) : values [ n ] = ord ( values [ n ] ) elif data_type in [ HC . UCHAR8 , HC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == HC . INT8 : buf = _C . array_int8 ( n_values ) values = list ( values ) for n in range ( n_values ) : v = values [ n ] if v >= 0 : v &= 0x7f else : v = abs ( v ) & 0x7f if v : v = 256 - v else : v = 128 values [ n ] = v elif data_type == HC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == HC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == HC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == HC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == HC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == HC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "set: illegal or unimplemented data_type" ) for n in range ( n_values ) : buf [ n ] = values [ n ] status = _C . VSsetattr ( self . _vd_inst . _id , self . _fIndex , self . _name , data_type , n_values , buf ) _checkErr ( 'attr' , status , 'cannot execute' ) self . _index = _C . VSfindattr ( self . _vd_inst . _id , self . _fIndex , self . _name ) if self . _index < 0 : raise HDF4Error ( "set: error retrieving attribute index" ) | Set the attribute value . |
48,544 | def getlibversion ( ) : status , major_v , minor_v , release , info = _C . Hgetlibversion ( ) _checkErr ( 'getlibversion' , status , "cannot get lib version" ) return major_v , minor_v , release , info | Get the library version info . |
48,545 | def getfileversion ( self ) : status , major_v , minor_v , release , info = _C . Hgetfileversion ( self . _id ) _checkErr ( 'getfileversion' , status , "cannot get file version" ) return major_v , minor_v , release , info | Get file version info . |
48,546 | def colorize ( lead , num , color ) : if num != 0 and ANSIBLE_COLOR and color is not None : return "%s%s%-15s" % ( stringc ( lead , color ) , stringc ( "=" , color ) , stringc ( str ( num ) , color ) ) else : return "%s=%-4s" % ( lead , str ( num ) ) | Print lead = num in color |
48,547 | def timeline ( self , request , drip_id , into_past , into_future ) : from django . shortcuts import render , get_object_or_404 drip = get_object_or_404 ( Drip , id = drip_id ) shifted_drips = [ ] seen_users = set ( ) for shifted_drip in drip . drip . walk ( into_past = int ( into_past ) , into_future = int ( into_future ) + 1 ) : shifted_drip . prune ( ) shifted_drips . append ( { 'drip' : shifted_drip , 'qs' : shifted_drip . get_queryset ( ) . exclude ( id__in = seen_users ) } ) seen_users . update ( shifted_drip . get_queryset ( ) . values_list ( 'id' , flat = True ) ) return render ( request , 'drip/timeline.html' , locals ( ) ) | Return a list of people who should get emails . |
48,548 | def walk ( self , into_past = 0 , into_future = 0 ) : walked_range = [ ] for shift in range ( - into_past , into_future ) : kwargs = dict ( drip_model = self . drip_model , name = self . name , now_shift_kwargs = { 'days' : shift } ) walked_range . append ( self . __class__ ( ** kwargs ) ) return walked_range | Walk over a date range and create new instances of self with new ranges . |
48,549 | def run ( self ) : if not self . drip_model . enabled : return None self . prune ( ) count = self . send ( ) return count | Get the queryset prune sent people and send it . |
48,550 | def prune ( self ) : target_user_ids = self . get_queryset ( ) . values_list ( 'id' , flat = True ) exclude_user_ids = SentDrip . objects . filter ( date__lt = conditional_now ( ) , drip = self . drip_model , user__id__in = target_user_ids ) . values_list ( 'user_id' , flat = True ) self . _queryset = self . get_queryset ( ) . exclude ( id__in = exclude_user_ids ) | Do an exclude for all Users who have a SentDrip already . |
48,551 | def send ( self ) : if not self . from_email : self . from_email = getattr ( settings , 'DRIP_FROM_EMAIL' , settings . DEFAULT_FROM_EMAIL ) MessageClass = message_class_for ( self . drip_model . message_class ) count = 0 for user in self . get_queryset ( ) : message_instance = MessageClass ( self , user ) try : result = message_instance . message . send ( ) if result : SentDrip . objects . create ( drip = self . drip_model , user = user , from_email = self . from_email , from_email_name = self . from_email_name , subject = message_instance . subject , body = message_instance . body ) count += 1 except Exception as e : logging . error ( "Failed to send drip %s to user %s: %s" % ( self . drip_model . id , user , e ) ) return count | Send the message to each user on the queryset . |
48,552 | def angle ( self , other ) : return math . acos ( self . dot ( other ) / ( self . magnitude ( ) * other . magnitude ( ) ) ) | Return the angle to the vector other |
48,553 | def project ( self , other ) : n = other . normalized ( ) return self . dot ( n ) * n | Return one vector projected on the vector other |
48,554 | def rotate_around ( self , axis , theta ) : x , y , z = self . x , self . y , self . z u , v , w = axis . x , axis . y , axis . z r2 = u ** 2 + v ** 2 + w ** 2 r = math . sqrt ( r2 ) ct = math . cos ( theta ) st = math . sin ( theta ) / r dt = ( u * x + v * y + w * z ) * ( 1 - ct ) / r2 return Vector3 ( ( u * dt + x * ct + ( - w * y + v * z ) * st ) , ( v * dt + y * ct + ( w * x - u * z ) * st ) , ( w * dt + z * ct + ( - v * x + u * y ) * st ) ) | Return the vector rotated around axis through angle theta . |
48,555 | def preparedir ( target_dir , remove_content = True ) : if os . path . isdir ( target_dir ) : if remove_content : nukedir ( target_dir , False ) return True else : try : os . makedirs ( target_dir ) return True except Exception as e : print ( "Failed to create folder: %s\n%s" % ( target_dir , e ) ) return False | Prepare a folder for analysis . |
48,556 | def nukedir ( target_dir , rmdir = False ) : d = os . path . normpath ( target_dir ) if not os . path . isdir ( d ) : return files = os . listdir ( d ) for f in files : if f == '.' or f == '..' : continue path = os . path . join ( d , f ) if os . path . isdir ( path ) : nukedir ( path ) else : try : os . remove ( path ) except Exception : print ( "Failed to remove %s" % path ) if rmdir : try : os . rmdir ( d ) except Exception : print ( "Failed to remove %s" % d ) | Delete all the files inside target_dir . |
48,557 | def write_to_file_by_name ( folder , fname , data , mkdir = False ) : if not os . path . isdir ( folder ) : if mkdir : preparedir ( folder ) else : created = preparedir ( folder , False ) if not created : raise ValueError ( "Failed to find %s." % folder ) file_path = os . path . join ( folder , fname ) with open ( file_path , writemode ) as outf : try : outf . write ( str ( data ) ) return file_path except Exception as e : raise IOError ( "Failed to write %s to file:\n\t%s" % ( fname , str ( e ) ) ) | Write a string of data to file by filename and folder . |
48,558 | def copy_files_to_folder ( files , target_folder , overwrite = True ) : if not files : return [ ] for f in files : target = os . path . join ( target_folder , os . path . split ( f ) [ - 1 ] ) if target == f : return target if os . path . exists ( target ) : if overwrite : try : os . remove ( target ) except Exception : raise IOError ( "Failed to remove %s" % f ) else : shutil . copy ( f , target ) else : continue else : print ( 'Copying %s to %s' % ( os . path . split ( f ) [ - 1 ] , os . path . normpath ( target_folder ) ) ) shutil . copy ( f , target ) return [ os . path . join ( target_folder , os . path . split ( f ) [ - 1 ] ) for f in files ] | Copy a list of files to a new target folder . |
48,559 | def bat_to_sh ( file_path ) : sh_file = file_path [ : - 4 ] + '.sh' with open ( file_path , 'rb' ) as inf , open ( sh_file , 'wb' ) as outf : outf . write ( '#!/usr/bin/env bash\n\n' ) for line in inf : if line . strip ( ) : continue else : break for line in inf : if line . startswith ( 'echo' ) : continue modified_line = line . replace ( 'c:\\radiance\\bin\\' , '' ) . replace ( '\\' , '/' ) outf . write ( modified_line ) print ( 'bash file is created at:\n\t%s' % sh_file ) st = os . stat ( sh_file ) os . chmod ( sh_file , st . st_mode | 0o111 ) return sh_file | Convert honeybee . bat file to . sh file . |
48,560 | def _download_py2 ( link , path , __hdr__ ) : try : req = urllib2 . Request ( link , headers = __hdr__ ) u = urllib2 . urlopen ( req ) except Exception as e : raise Exception ( ' Download failed with the error:\n{}' . format ( e ) ) with open ( path , 'wb' ) as outf : for l in u : outf . write ( l ) u . close ( ) | Download a file from a link in Python 2 . |
48,561 | def _download_py3 ( link , path , __hdr__ ) : try : req = urllib . request . Request ( link , headers = __hdr__ ) u = urllib . request . urlopen ( req ) except Exception as e : raise Exception ( ' Download failed with the error:\n{}' . format ( e ) ) with open ( path , 'wb' ) as outf : for l in u : outf . write ( l ) u . close ( ) | Download a file from a link in Python 3 . |
48,562 | def download_file_by_name ( url , target_folder , file_name , mkdir = False ) : __hdr__ = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 ' '(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11' , 'Accept' : 'text/html,application/xhtml+xml,' 'application/xml;q=0.9,*/*;q=0.8' , 'Accept-Charset' : 'ISO-8859-1,utf-8;q=0.7,*;q=0.3' , 'Accept-Encoding' : 'none' , 'Accept-Language' : 'en-US,en;q=0.8' , 'Connection' : 'keep-alive' } if not os . path . isdir ( target_folder ) : if mkdir : preparedir ( target_folder ) else : created = preparedir ( target_folder , False ) if not created : raise ValueError ( "Failed to find %s." % target_folder ) file_path = os . path . join ( target_folder , file_name ) if ( sys . version_info < ( 3 , 0 ) ) : _download_py2 ( url , file_path , __hdr__ ) else : _download_py3 ( url , file_path , __hdr__ ) | Download a file to a directory . |
48,563 | def unzip_file ( source_file , dest_dir = None , mkdir = False ) : if dest_dir is None : dest_dir , fname = os . path . split ( source_file ) elif not os . path . isdir ( dest_dir ) : if mkdir : preparedir ( dest_dir ) else : created = preparedir ( dest_dir , False ) if not created : raise ValueError ( "Failed to find %s." % dest_dir ) with zipfile . ZipFile ( source_file ) as zf : for member in zf . infolist ( ) : words = member . filename . split ( '\\' ) for word in words [ : - 1 ] : drive , word = os . path . splitdrive ( word ) head , word = os . path . split ( word ) if word in ( os . curdir , os . pardir , '' ) : continue dest_dir = os . path . join ( dest_dir , word ) zf . extract ( member , dest_dir ) | Unzip a compressed file . |
48,564 | def csv_to_matrix ( csv_file_path ) : mtx = [ ] with open ( csv_file_path ) as csv_data_file : for row in csv_data_file : mtx . append ( row . split ( ',' ) ) return mtx | Load a CSV file into a Python matrix of strings . |
48,565 | def csv_to_num_matrix ( csv_file_path ) : mtx = [ ] with open ( csv_file_path ) as csv_data_file : for row in csv_data_file : mtx . append ( [ float ( val ) for val in row . split ( ',' ) ] ) return mtx | Load a CSV file consisting only of numbers into a Python matrix of floats . |
48,566 | def from_json ( cls , data ) : stat_ob = cls ( None ) option_keys_none = ( 'ashrae_climate_zone' , 'koppen_climate_zone' , 'extreme_cold_week' , 'extreme_hot_week' , 'standard_pressure_at_elev' ) option_keys_list = ( 'monthly_db_50' , 'monthly_wb_50' , 'monthly_db_range_50' , 'monthly_wb_range_50' , 'monthly_db_100' , 'monthly_wb_100' , 'monthly_db_20' , 'monthly_wb_20' , 'monthly_db_04' , 'monthly_wb_04' , 'monthly_wind' , 'monthly_wind_dirs' , 'monthly_tau_beam' , 'monthly_tau_diffuse' ) option_keys_dict = ( 'typical_weeks' , 'heating_dict' , 'cooling_dict' ) assert 'location' in data , 'Required key "location" is missing!' for key in option_keys_none : if key not in data : data [ key ] = None for key in option_keys_list : if key not in data : data [ key ] = [ ] for key in option_keys_dict : if key not in data : data [ key ] = { } stat_ob . _location = Location . from_json ( data [ 'location' ] ) stat_ob . _ashrae_climate_zone = data [ 'ashrae_climate_zone' ] stat_ob . _koppen_climate_zone = data [ 'koppen_climate_zone' ] stat_ob . _extreme_cold_week = AnalysisPeriod . from_json ( data [ 'extreme_cold_week' ] ) if data [ 'extreme_cold_week' ] else None stat_ob . _extreme_hot_week = AnalysisPeriod . from_json ( data [ 'extreme_hot_week' ] ) if data [ 'extreme_hot_week' ] else None stat_ob . _typical_weeks = { } for key , val in data [ 'typical_weeks' ] . items ( ) : if isinstance ( val , list ) : stat_ob . _typical_weeks [ key ] = [ AnalysisPeriod . from_json ( v ) for v in val ] else : stat_ob . _typical_weeks [ key ] = AnalysisPeriod . from_json ( val ) stat_ob . _winter_des_day_dict = data [ 'heating_dict' ] stat_ob . _summer_des_day_dict = data [ 'cooling_dict' ] stat_ob . _monthly_db_50 = data [ 'monthly_db_50' ] stat_ob . _monthly_wb_50 = data [ 'monthly_wb_50' ] stat_ob . _monthly_db_range_50 = data [ 'monthly_db_range_50' ] stat_ob . _monthly_wb_range_50 = data [ 'monthly_wb_range_50' ] stat_ob . _monthly_db_100 = data [ 'monthly_db_100' ] stat_ob . _monthly_wb_100 = data [ 'monthly_wb_100' ] stat_ob . _monthly_db_20 = data [ 'monthly_db_20' ] stat_ob . _monthly_wb_20 = data [ 'monthly_wb_20' ] stat_ob . _monthly_db_04 = data [ 'monthly_db_04' ] stat_ob . _monthly_wb_04 = data [ 'monthly_wb_04' ] stat_ob . _monthly_wind = data [ 'monthly_wind' ] stat_ob . _monthly_wind_dirs = data [ 'monthly_wind_dirs' ] stat_ob . _stand_press_at_elev = data [ 'standard_pressure_at_elev' ] stat_ob . _monthly_tau_beam = data [ 'monthly_tau_beam' ] stat_ob . _monthly_tau_diffuse = data [ 'monthly_tau_diffuse' ] return stat_ob | Create STAT from json dictionary . |
48,567 | def monthly_cooling_design_days_050 ( self ) : if self . monthly_found is False or self . _monthly_db_50 == [ ] or self . _monthly_wb_50 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_50 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_50 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '5% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ] | A list of 12 objects representing monthly 5 . 0% cooling design days . |
48,568 | def monthly_cooling_design_days_100 ( self ) : if self . monthly_found is False or self . _monthly_db_100 == [ ] or self . _monthly_wb_100 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_100 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_100 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '10% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ] | A list of 12 objects representing monthly 10 . 0% cooling design days . |
48,569 | def monthly_cooling_design_days_020 ( self ) : if self . monthly_found is False or self . _monthly_db_20 == [ ] or self . _monthly_wb_20 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_20 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_20 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '2% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ] | A list of 12 objects representing monthly 2 . 0% cooling design days . |
48,570 | def monthly_cooling_design_days_004 ( self ) : if self . monthly_found is False or self . _monthly_db_04 == [ ] or self . _monthly_wb_04 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_04 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_04 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '0.4% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ] | A list of 12 objects representing monthly 0 . 4% cooling design days . |
48,571 | def monthly_wind_conditions ( self ) : return [ WindCondition ( x , y ) for x , y in zip ( self . _monthly_wind , self . monthly_wind_dirs ) ] | A list of 12 monthly wind conditions that are used on the design days . |
48,572 | def monthly_wind_dirs ( self ) : mwd = zip ( * self . _monthly_wind_dirs ) return [ self . _wind_dirs [ mon . index ( max ( mon ) ) ] for mon in mwd ] | A list of prevailing wind directions for each month . |
48,573 | def monthly_clear_sky_conditions ( self ) : if self . _monthly_tau_diffuse is [ ] or self . _monthly_tau_beam is [ ] : return [ OriginalClearSkyCondition ( i , 21 ) for i in xrange ( 1 , 13 ) ] return [ RevisedClearSkyCondition ( i , 21 , x , y ) for i , x , y in zip ( list ( xrange ( 1 , 13 ) ) , self . _monthly_tau_beam , self . _monthly_tau_diffuse ) ] | A list of 12 monthly clear sky conditions that are used on the design days . |
48,574 | def to_json ( self ) : def jsonify_dict ( base_dict ) : new_dict = { } for key , val in base_dict . items ( ) : if isinstance ( val , list ) : new_dict [ key ] = [ v . to_json ( ) for v in val ] else : new_dict [ key ] = val . to_json ( ) return new_dict return { 'location' : self . location . to_json ( ) , 'ashrae_climate_zone' : self . ashrae_climate_zone , 'koppen_climate_zone' : self . koppen_climate_zone , 'extreme_cold_week' : self . extreme_cold_week . to_json ( ) if self . extreme_cold_week else None , 'extreme_hot_week' : self . extreme_hot_week . to_json ( ) if self . extreme_cold_week else None , 'typical_weeks' : jsonify_dict ( self . _typical_weeks ) , 'heating_dict' : self . _winter_des_day_dict , 'cooling_dict' : self . _summer_des_day_dict , "monthly_db_50" : self . _monthly_db_50 , "monthly_wb_50" : self . _monthly_wb_50 , "monthly_db_range_50" : self . _monthly_db_range_50 , "monthly_wb_range_50" : self . _monthly_wb_range_50 , "monthly_db_100" : self . _monthly_db_100 , "monthly_wb_100" : self . _monthly_wb_100 , "monthly_db_20" : self . _monthly_db_20 , "monthly_wb_20" : self . _monthly_wb_20 , "monthly_db_04" : self . _monthly_db_04 , "monthly_wb_04" : self . _monthly_wb_04 , "monthly_wind" : self . _monthly_wind , "monthly_wind_dirs" : self . _monthly_wind_dirs , "standard_pressure_at_elev" : self . standard_pressure_at_elev , "monthly_tau_beam" : self . monthly_tau_beam , "monthly_tau_diffuse" : self . monthly_tau_diffuse } | Convert the STAT object to a dictionary . |
48,575 | def from_json ( cls , data ) : assert 'name' in data , 'Required keyword "name" is missing!' assert 'data_type' in data , 'Required keyword "data_type" is missing!' if cls . _type_enumeration is None : cls . _type_enumeration = _DataTypeEnumeration ( import_modules = False ) if data [ 'data_type' ] == 'GenericType' : assert 'base_unit' in data , 'Keyword "base_unit" is missing and is required for GenericType.' return cls . _type_enumeration . _GENERICTYPE ( data [ 'name' ] , data [ 'base_unit' ] ) elif data [ 'data_type' ] in cls . _type_enumeration . _TYPES : clss = cls . _type_enumeration . _TYPES [ data [ 'data_type' ] ] if data [ 'data_type' ] == data [ 'name' ] . title ( ) . replace ( ' ' , '' ) : return clss ( ) else : instance = clss ( ) instance . _name = data [ 'name' ] return instance else : raise ValueError ( 'Data Type {} could not be recognized' . format ( data [ 'data_type' ] ) ) | Create a data type from a dictionary . |
48,576 | def is_unit_acceptable ( self , unit , raise_exception = True ) : _is_acceptable = unit in self . units if _is_acceptable or raise_exception is False : return _is_acceptable else : raise ValueError ( '{0} is not an acceptable unit type for {1}. ' 'Choose from the following: {2}' . format ( unit , self . __class__ . __name__ , self . units ) ) | Check if a certain unit is acceptable for the data type . |
48,577 | def _is_numeric ( self , values ) : if len ( values ) > 0 : assert isinstance ( values [ 0 ] , ( float , int ) ) , "values must be numbers to perform math operations. Got {}" . format ( type ( values [ 0 ] ) ) return True | Check to be sure values are numbers before doing numerical operations . |
48,578 | def _to_unit_base ( self , base_unit , values , unit , from_unit ) : self . _is_numeric ( values ) namespace = { 'self' : self , 'values' : values } if not from_unit == base_unit : self . is_unit_acceptable ( from_unit , True ) statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( from_unit ) , self . _clean ( base_unit ) ) values = eval ( statement , namespace ) namespace [ 'values' ] = values if not unit == base_unit : self . is_unit_acceptable ( unit , True ) statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( base_unit ) , self . _clean ( unit ) ) values = eval ( statement , namespace ) return values | Return values in a given unit given the input from_unit . |
48,579 | def name ( self ) : if self . _name is None : return re . sub ( r"(?<=\w)([A-Z])" , r" \1" , self . __class__ . __name__ ) else : return self . _name | The data type name . |
48,580 | def from_json ( cls , data ) : assert 'data_type' in data , 'Required keyword "data_type" is missing!' keys = ( 'data_type' , 'unit' , 'analysis_period' , 'metadata' ) for key in keys : if key not in data : data [ key ] = None data_type = DataTypeBase . from_json ( data [ 'data_type' ] ) ap = AnalysisPeriod . from_json ( data [ 'analysis_period' ] ) return cls ( data_type , data [ 'unit' ] , ap , data [ 'metadata' ] ) | Create a header from a dictionary . |
48,581 | def duplicate ( self ) : a_per = self . analysis_period . duplicate ( ) if self . analysis_period else None return self . __class__ ( self . data_type , self . unit , a_per , deepcopy ( self . metadata ) ) | Return a copy of the header . |
48,582 | def to_tuple ( self ) : return ( self . data_type , self . unit , self . analysis_period , self . metadata ) | Return Ladybug header as a list . |
48,583 | def to_json ( self ) : a_per = self . analysis_period . to_json ( ) if self . analysis_period else None return { 'data_type' : self . data_type . to_json ( ) , 'unit' : self . unit , 'analysis_period' : a_per , 'metadata' : self . metadata } | Return a header as a dictionary . |
48,584 | def ashrae_clear_sky ( altitudes , month , sky_clearness = 1 ) : MONTHLY_A = [ 1202 , 1187 , 1164 , 1130 , 1106 , 1092 , 1093 , 1107 , 1136 , 1166 , 1190 , 1204 ] MONTHLY_B = [ 0.141 , 0.142 , 0.149 , 0.164 , 0.177 , 0.185 , 0.186 , 0.182 , 0.165 , 0.152 , 0.144 , 0.141 ] dir_norm_rad = [ ] dif_horiz_rad = [ ] for i , alt in enumerate ( altitudes ) : if alt > 0 : try : dir_norm = MONTHLY_A [ month - 1 ] / ( math . exp ( MONTHLY_B [ month - 1 ] / ( math . sin ( math . radians ( alt ) ) ) ) ) diff_horiz = 0.17 * dir_norm * math . sin ( math . radians ( alt ) ) dir_norm_rad . append ( dir_norm * sky_clearness ) dif_horiz_rad . append ( diff_horiz * sky_clearness ) except OverflowError : dir_norm_rad . append ( 0 ) dif_horiz_rad . append ( 0 ) else : dir_norm_rad . append ( 0 ) dif_horiz_rad . append ( 0 ) return dir_norm_rad , dif_horiz_rad | Calculate solar flux for an original ASHRAE Clear Sky |
48,585 | def zhang_huang_solar ( alt , cloud_cover , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed , irr_0 = 1355 ) : C0 , C1 , C2 , C3 , C4 , C5 , D_COEFF , K_COEFF = 0.5598 , 0.4982 , - 0.6762 , 0.02842 , - 0.00317 , 0.014 , - 17.853 , 0.843 glob_ir = 0 if alt > 0 : sin_alt = math . sin ( math . radians ( alt ) ) cc , rh , n_temp , n3_temp , w_spd = cloud_cover / 10.0 , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed glob_ir = ( ( irr_0 * sin_alt * ( C0 + ( C1 * cc ) + ( C2 * cc ** 2 ) + ( C3 * ( n_temp - n3_temp ) ) + ( C4 * rh ) + ( C5 * w_spd ) ) ) + D_COEFF ) / K_COEFF if glob_ir < 0 : glob_ir = 0 return glob_ir | Calculate global horizontal solar irradiance using the Zhang - Huang model . |
48,586 | def zhang_huang_solar_split ( altitudes , doys , cloud_cover , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed , atm_pressure , use_disc = False ) : glob_ir = [ ] for i in range ( len ( altitudes ) ) : ghi = zhang_huang_solar ( altitudes [ i ] , cloud_cover [ i ] , relative_humidity [ i ] , dry_bulb_present [ i ] , dry_bulb_t3_hrs [ i ] , wind_speed [ i ] ) glob_ir . append ( ghi ) if use_disc is False : temp_dew = [ dew_point_from_db_rh ( dry_bulb_present [ i ] , relative_humidity [ i ] ) for i in range ( len ( glob_ir ) ) ] dir_norm_rad = dirint ( glob_ir , altitudes , doys , atm_pressure , use_delta_kt_prime = True , temp_dew = temp_dew ) dif_horiz_rad = [ glob_ir [ i ] - ( dir_norm_rad [ i ] * math . sin ( math . radians ( altitudes [ i ] ) ) ) for i in range ( len ( glob_ir ) ) ] else : dir_norm_rad = [ ] dif_horiz_rad = [ ] for i in range ( len ( glob_ir ) ) : dni , kt , am = disc ( glob_ir [ i ] , altitudes [ i ] , doys [ i ] , atm_pressure [ i ] ) dhi = glob_ir [ i ] - ( dni * math . sin ( math . radians ( altitudes [ i ] ) ) ) dir_norm_rad . append ( dni ) dif_horiz_rad . append ( dhi ) return dir_norm_rad , dif_horiz_rad | Calculate direct and diffuse solar irradiance using the Zhang - Huang model . |
48,587 | def calc_horizontal_infrared ( sky_cover , dry_bulb , dew_point ) : SIGMA = 5.6697e-8 db_k = dry_bulb + 273.15 dp_k = dew_point + 273.15 sky_emiss = ( 0.787 + ( 0.764 * math . log ( dp_k / 273.15 ) ) ) * ( 1 + ( 0.022 * sky_cover ) - ( 0.0035 * ( sky_cover ** 2 ) ) + ( 0.00028 * ( sky_cover ** 3 ) ) ) horiz_ir = sky_emiss * SIGMA * ( db_k ** 4 ) return horiz_ir | Calculate horizontal infrared radiation intensity . |
48,588 | def set_domain ( self , values ) : _flattenedList = sorted ( flatten ( values ) ) self . domain = tuple ( _flattenedList [ 0 ] if d == 'min' else d for d in self . domain ) self . domain = tuple ( _flattenedList [ - 1 ] if d == 'max' else d for d in self . domain ) | Set domain of the colors based on min and max of a list of values . |
48,589 | def timestep_text ( self ) : if self . header . analysis_period . timestep == 1 : return 'Hourly' else : return '{} Minute' . format ( int ( 60 / self . header . analysis_period . timestep ) ) | Return a text string representing the timestep of the collection . |
48,590 | def moys_dict ( self ) : moy_dict = { } for val , dt in zip ( self . values , self . datetimes ) : moy_dict [ dt . moy ] = val return moy_dict | Return a dictionary of this collection s values where the keys are the moys . |
48,591 | def filter_by_analysis_period ( self , analysis_period ) : self . _check_analysis_period ( analysis_period ) _filtered_data = self . filter_by_moys ( analysis_period . moys ) _filtered_data . header . _analysis_period = analysis_period return _filtered_data | Filter a Data Collection based on an analysis period . |
48,592 | def group_by_month_per_hour ( self ) : data_by_month_per_hour = OrderedDict ( ) for m in xrange ( 1 , 13 ) : for h in xrange ( 0 , 24 ) : data_by_month_per_hour [ ( m , h ) ] = [ ] for v , dt in zip ( self . values , self . datetimes ) : data_by_month_per_hour [ ( dt . month , dt . hour ) ] . append ( v ) return data_by_month_per_hour | Return a dictionary of this collection s values grouped by each month per hour . |
48,593 | def interpolate_holes ( self ) : assert self . validated_a_period is True , 'validated_a_period property must be' ' True to use interpolate_holes(). Run validate_analysis_period().' mins_per_step = int ( 60 / self . header . analysis_period . timestep ) new_datetimes = self . header . analysis_period . datetimes new_values = [ ] i = 0 if new_datetimes [ 0 ] != self . datetimes [ 0 ] : n_steps = int ( ( self . datetimes [ 0 ] . moy - new_datetimes [ 0 ] . moy ) / mins_per_step ) new_values . extend ( [ self . _values [ 0 ] ] * n_steps ) i = n_steps - 1 for j in xrange ( len ( self . _values ) ) : if new_datetimes [ i ] == self . datetimes [ j ] : new_values . append ( self . _values [ j ] ) i += 1 else : n_steps = int ( ( self . datetimes [ j ] . moy - new_datetimes [ i ] . moy ) / mins_per_step ) intp_vals = self . _xxrange ( self . _values [ j - 1 ] , self . _values [ j ] , n_steps ) new_values . extend ( list ( intp_vals ) [ 1 : ] + [ self . _values [ j ] ] ) i += n_steps if len ( new_values ) != len ( new_datetimes ) : n_steps = len ( new_datetimes ) - len ( new_values ) new_values . extend ( [ self . _values [ - 1 ] ] * n_steps ) return HourlyContinuousCollection ( self . header . duplicate ( ) , new_values ) | Linearly interpolate over holes in this collection to make it continuous . |
48,594 | def cull_to_timestep ( self , timestep = 1 ) : valid_s = self . header . analysis_period . VALIDTIMESTEPS . keys ( ) assert timestep in valid_s , 'timestep {} is not valid. Choose from: {}' . format ( timestep , valid_s ) new_ap , new_values , new_datetimes = self . _timestep_cull ( timestep ) new_header = self . header . duplicate ( ) new_header . _analysis_period = new_ap new_coll = HourlyDiscontinuousCollection ( new_header , new_values , new_datetimes ) new_coll . _validated_a_period = True return new_coll | Get a collection with only datetimes that fit a timestep . |
48,595 | def convert_to_culled_timestep ( self , timestep = 1 ) : valid_s = self . header . analysis_period . VALIDTIMESTEPS . keys ( ) assert timestep in valid_s , 'timestep {} is not valid. Choose from: {}' . format ( timestep , valid_s ) new_ap , new_values , new_datetimes = self . _timestep_cull ( timestep ) self . header . _analysis_period = new_ap self . _values = new_values self . _datetimes = new_datetimes | Convert this collection to one that only has datetimes that fit a timestep . |
48,596 | def _xxrange ( self , start , end , step_count ) : _step = ( end - start ) / float ( step_count ) return ( start + ( i * _step ) for i in xrange ( int ( step_count ) ) ) | Generate n values between start and end . |
48,597 | def _filter_by_moys_slow ( self , moys ) : _filt_values = [ ] _filt_datetimes = [ ] for i , d in enumerate ( self . datetimes ) : if d . moy in moys : _filt_datetimes . append ( d ) _filt_values . append ( self . _values [ i ] ) return _filt_values , _filt_datetimes | Filter the Data Collection with a slow method that always works . |
48,598 | def _timestep_cull ( self , timestep ) : new_values = [ ] new_datetimes = [ ] mins_per_step = int ( 60 / timestep ) for i , date_t in enumerate ( self . datetimes ) : if date_t . moy % mins_per_step == 0 : new_datetimes . append ( date_t ) new_values . append ( self . values [ i ] ) a_per = self . header . analysis_period new_ap = AnalysisPeriod ( a_per . st_month , a_per . st_day , a_per . st_hour , a_per . end_month , a_per . end_day , a_per . end_hour , timestep , a_per . is_leap_year ) return new_ap , new_values , new_datetimes | Cull out values that do not fit a timestep . |
48,599 | def _time_interval_operation ( self , interval , operation , percentile = 0 ) : if operation == 'average' : funct = self . _average elif operation == 'total' : funct = self . _total else : assert 0 <= percentile <= 100 , 'percentile must be between 0 and 100. Got {}' . format ( percentile ) funct = self . _get_percentile_function ( percentile ) if interval == 'monthly' : data_dict = self . group_by_month ( ) dates = self . header . analysis_period . months_int elif interval == 'daily' : data_dict = self . group_by_day ( ) dates = self . header . analysis_period . doys_int elif interval == 'monthlyperhour' : data_dict = self . group_by_month_per_hour ( ) dates = self . header . analysis_period . months_per_hour else : raise ValueError ( 'Invalid input value for interval: {}' . format ( interval ) ) new_data , d_times = [ ] , [ ] for i in dates : vals = data_dict [ i ] if vals != [ ] : new_data . append ( funct ( vals ) ) d_times . append ( i ) new_header = self . header . duplicate ( ) if operation == 'percentile' : new_header . metadata [ 'operation' ] = '{} percentile' . format ( percentile ) else : new_header . metadata [ 'operation' ] = operation if interval == 'monthly' : collection = MonthlyCollection ( new_header , new_data , d_times ) elif interval == 'daily' : collection = DailyCollection ( new_header , new_data , d_times ) elif interval == 'monthlyperhour' : collection = MonthlyPerHourCollection ( new_header , new_data , d_times ) collection . _validated_a_period = True return collection | Get a collection of a certain time interval with a given math operation . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.