idx
int64
0
251k
question
stringlengths
53
3.53k
target
stringlengths
5
1.23k
len_question
int64
20
893
len_target
int64
3
238
236,400
def getid ( self , ref ) : num = _C . Vgetid ( self . _hdf_inst . _id , ref ) _checkErr ( 'getid' , num , "bad arguments or last vgroup reached" ) return num
Obtain the reference number of the vgroup following the vgroup with the given reference number .
55
19
236,401
def insert ( self , inst ) : if isinstance ( inst , VD ) : id = inst . _id elif isinstance ( inst , VG ) : id = inst . _id else : raise HDF4Error ( "insrt: bad argument" ) index = _C . Vinsert ( self . _id , id ) _checkErr ( 'insert' , index , "cannot insert in vgroup" ) return index
Insert a vdata or a vgroup in the vgroup .
93
13
236,402
def add ( self , tag , ref ) : n = _C . Vaddtagref ( self . _id , tag , ref ) _checkErr ( 'addtagref' , n , 'invalid arguments' ) return n
Add to the vgroup an object identified by its tag and reference number .
50
15
236,403
def delete ( self , tag , ref ) : _checkErr ( 'delete' , _C . Vdeletetagref ( self . _id , tag , ref ) , "error deleting member" )
Delete from the vgroup the member identified by its tag and reference number .
45
15
236,404
def tagref ( self , index ) : status , tag , ref = _C . Vgettagref ( self . _id , index ) _checkErr ( 'tagref' , status , "illegal arguments" ) return tag , ref
Get the tag and reference number of a vgroup member given the index number of that member .
51
19
236,405
def tagrefs ( self ) : n = self . _nmembers ret = [ ] if n : tags = _C . array_int32 ( n ) refs = _C . array_int32 ( n ) k = _C . Vgettagrefs ( self . _id , tags , refs , n ) _checkErr ( 'tagrefs' , k , "error getting tags and refs" ) for m in xrange ( k ) : ret . append ( ( tags [ m ] , refs [ m ] ) ) return ret
Get the tags and reference numbers of all the vgroup members .
120
13
236,406
def inqtagref ( self , tag , ref ) : return _C . Vinqtagref ( self . _id , tag , ref )
Determines if an object identified by its tag and reference number belongs to the vgroup .
31
19
236,407
def nrefs ( self , tag ) : n = _C . Vnrefs ( self . _id , tag ) _checkErr ( 'nrefs' , n , "bad arguments" ) return n
Determine the number of tags of a given type in a vgroup .
47
16
236,408
def attrinfo ( self ) : dic = { } for n in range ( self . _nattrs ) : att = self . attr ( n ) name , type , order , size = att . info ( ) dic [ name ] = ( type , order , att . get ( ) , size ) return dic
Return info about all the vgroup attributes .
70
9
236,409
def findattr ( self , name ) : try : att = self . attr ( name ) if att . _index is None : att = None except HDF4Error : att = None return att
Search the vgroup for a given attribute .
42
9
236,410
def index ( self ) : self . _index = _C . SDfindattr ( self . _obj . _id , self . _name ) _checkErr ( 'find' , self . _index , 'illegal attribute name' ) return self . _index
Retrieve the attribute index number .
56
7
236,411
def end ( self ) : status = _C . SDend ( self . _id ) _checkErr ( 'end' , status , "cannot execute" ) self . _id = None
End access to the SD interface and close the HDF file .
42
13
236,412
def info ( self ) : status , n_datasets , n_file_attrs = _C . SDfileinfo ( self . _id ) _checkErr ( 'info' , status , "cannot execute" ) return n_datasets , n_file_attrs
Retrieve information about the SD interface .
63
8
236,413
def nametoindex ( self , sds_name ) : sds_idx = _C . SDnametoindex ( self . _id , sds_name ) _checkErr ( 'nametoindex' , sds_idx , 'non existent SDS' ) return sds_idx
Return the index number of a dataset given the dataset name .
69
12
236,414
def reftoindex ( self , sds_ref ) : sds_idx = _C . SDreftoindex ( self . _id , sds_ref ) _checkErr ( 'reftoindex' , sds_idx , 'illegal SDS ref number' ) return sds_idx
Returns the index number of a dataset given the dataset reference number .
71
13
236,415
def setfillmode ( self , fill_mode ) : if not fill_mode in [ SDC . FILL , SDC . NOFILL ] : raise HDF4Error ( "bad fill mode" ) old_mode = _C . SDsetfillmode ( self . _id , fill_mode ) _checkErr ( 'setfillmode' , old_mode , 'cannot execute' ) return old_mode
Set the fill mode for all the datasets in the file .
91
12
236,416
def select ( self , name_or_index ) : if isinstance ( name_or_index , type ( 1 ) ) : idx = name_or_index else : try : idx = self . nametoindex ( name_or_index ) except HDF4Error : raise HDF4Error ( "select: non-existent dataset" ) id = _C . SDselect ( self . _id , idx ) _checkErr ( 'select' , id , "cannot execute" ) return SDS ( self , id )
Locate a dataset .
118
5
236,417
def attributes ( self , full = 0 ) : # Get the number of global attributes. nsds , natts = self . info ( ) # Inquire each attribute res = { } for n in range ( natts ) : a = self . attr ( n ) name , aType , nVal = a . info ( ) if full : res [ name ] = ( a . get ( ) , a . index ( ) , aType , nVal ) else : res [ name ] = a . get ( ) return res
Return a dictionnary describing every global attribute attached to the SD interface .
110
15
236,418
def datasets ( self ) : # Get number of datasets nDs = self . info ( ) [ 0 ] # Inquire each var res = { } for n in range ( nDs ) : # Get dataset info. v = self . select ( n ) vName , vRank , vLen , vType , vAtt = v . info ( ) if vRank < 2 : # need a sequence vLen = [ vLen ] # Get dimension info. dimNames = [ ] dimLengths = [ ] for dimNum in range ( vRank ) : d = v . dim ( dimNum ) dimNames . append ( d . info ( ) [ 0 ] ) dimLengths . append ( vLen [ dimNum ] ) res [ vName ] = ( tuple ( dimNames ) , tuple ( dimLengths ) , vType , n ) return res
Return a dictionnary describing all the file datasets .
179
11
236,419
def endaccess ( self ) : status = _C . SDendaccess ( self . _id ) _checkErr ( 'endaccess' , status , "cannot execute" ) self . _id = None
Terminates access to the SDS .
45
8
236,420
def dim ( self , dim_index ) : id = _C . SDgetdimid ( self . _id , dim_index ) _checkErr ( 'dim' , id , 'invalid SDS identifier or dimension index' ) return SDim ( self , id , dim_index )
Get an SDim instance given a dimension index number .
63
11
236,421
def get ( self , start = None , count = None , stride = None ) : # Obtain SDS info. try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) if isinstance ( dim_sizes , type ( 1 ) ) : dim_sizes = [ dim_sizes ] except HDF4Error : raise HDF4Error ( 'get : cannot execute' ) # Validate args. if start is None : start = [ 0 ] * rank elif isinstance ( start , type ( 1 ) ) : start = [ start ] if count is None : count = dim_sizes if count [ 0 ] == 0 : count [ 0 ] = 1 elif isinstance ( count , type ( 1 ) ) : count = [ count ] if stride is None : stride = [ 1 ] * rank elif isinstance ( stride , type ( 1 ) ) : stride = [ stride ] if len ( start ) != rank or len ( count ) != rank or len ( stride ) != rank : raise HDF4Error ( 'get : start, stride or count ' 'do not match SDS rank' ) for n in range ( rank ) : if start [ n ] < 0 or start [ n ] + ( abs ( count [ n ] ) - 1 ) * stride [ n ] >= dim_sizes [ n ] : raise HDF4Error ( 'get arguments violate ' 'the size (%d) of dimension %d' % ( dim_sizes [ n ] , n ) ) if not data_type in SDC . equivNumericTypes : raise HDF4Error ( 'get cannot currrently deal with ' 'the SDS data type' ) return _C . _SDreaddata_0 ( self . _id , data_type , start , count , stride )
Read data from the dataset .
398
6
236,422
def set ( self , data , start = None , count = None , stride = None ) : # Obtain SDS info. try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) if isinstance ( dim_sizes , type ( 1 ) ) : dim_sizes = [ dim_sizes ] except HDF4Error : raise HDF4Error ( 'set : cannot execute' ) # Validate args. if start is None : start = [ 0 ] * rank elif isinstance ( start , type ( 1 ) ) : start = [ start ] if count is None : count = dim_sizes if count [ 0 ] == 0 : count [ 0 ] = 1 elif isinstance ( count , type ( 1 ) ) : count = [ count ] if stride is None : stride = [ 1 ] * rank elif isinstance ( stride , type ( 1 ) ) : stride = [ stride ] if len ( start ) != rank or len ( count ) != rank or len ( stride ) != rank : raise HDF4Error ( 'set : start, stride or count ' 'do not match SDS rank' ) unlimited = self . isrecord ( ) for n in range ( rank ) : ok = 1 if start [ n ] < 0 : ok = 0 elif n > 0 or not unlimited : if start [ n ] + ( abs ( count [ n ] ) - 1 ) * stride [ n ] >= dim_sizes [ n ] : ok = 0 if not ok : raise HDF4Error ( 'set arguments violate ' 'the size (%d) of dimension %d' % ( dim_sizes [ n ] , n ) ) # ??? Check support for UINT16 if not data_type in SDC . equivNumericTypes : raise HDF4Error ( 'set cannot currrently deal ' 'with the SDS data type' ) _C . _SDwritedata_0 ( self . _id , data_type , start , count , data , stride )
Write data to the dataset .
441
6
236,423
def info ( self ) : buf = _C . array_int32 ( _C . H4_MAX_VAR_DIMS ) status , sds_name , rank , data_type , n_attrs = _C . SDgetinfo ( self . _id , buf ) _checkErr ( 'info' , status , "cannot execute" ) dim_sizes = _array_to_ret ( buf , rank ) return sds_name , rank , dim_sizes , data_type , n_attrs
Retrieves information about the dataset .
117
8
236,424
def checkempty ( self ) : status , emptySDS = _C . SDcheckempty ( self . _id ) _checkErr ( 'checkempty' , status , 'invalid SDS identifier' ) return emptySDS
Determine whether the dataset is empty .
58
9
236,425
def ref ( self ) : sds_ref = _C . SDidtoref ( self . _id ) _checkErr ( 'idtoref' , sds_ref , 'illegal SDS identifier' ) return sds_ref
Get the reference number of the dataset .
54
8
236,426
def getcal ( self ) : status , cal , cal_error , offset , offset_err , data_type = _C . SDgetcal ( self . _id ) _checkErr ( 'getcal' , status , 'no calibration record' ) return cal , cal_error , offset , offset_err , data_type
Retrieve the SDS calibration coefficients .
71
8
236,427
def getdatastrs ( self ) : status , label , unit , format , coord_system = _C . SDgetdatastrs ( self . _id , 128 ) _checkErr ( 'getdatastrs' , status , 'cannot execute' ) return label , unit , format , coord_system
Retrieve the dataset standard string attributes .
67
8
236,428
def getrange ( self ) : # Obtain SDS data type. try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'getrange : invalid SDS identifier' ) n_values = 1 convert = _array_to_ret if data_type == SDC . CHAR8 : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) convert = _array_to_str elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf1 = _C . array_int8 ( n_values ) buf2 = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf1 = _C . array_int16 ( n_values ) buf2 = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf1 = _C . array_uint16 ( n_values ) buf2 = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf1 = _C . array_int32 ( n_values ) buf2 = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf1 = _C . array_uint32 ( n_values ) buf2 = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf1 = _C . array_float32 ( n_values ) buf2 = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf1 = _C . array_float64 ( n_values ) buf2 = _C . array_float64 ( n_values ) else : raise HDF4Error ( "getrange: SDS has an illegal or " "unsupported type %d" % data ) # Note: The C routine returns the max in buf1 and the min # in buf2. We swap the values returned by the Python # interface, since it is more natural to return # min first, then max. status = _C . SDgetrange ( self . _id , buf1 , buf2 ) _checkErr ( 'getrange' , status , 'range not set' ) return convert ( buf2 , n_values ) , convert ( buf1 , n_values )
Retrieve the dataset min and max values .
607
9
236,429
def setcal ( self , cal , cal_error , offset , offset_err , data_type ) : status = _C . SDsetcal ( self . _id , cal , cal_error , offset , offset_err , data_type ) _checkErr ( 'setcal' , status , 'cannot execute' )
Set the dataset calibration coefficients .
71
6
236,430
def setdatastrs ( self , label , unit , format , coord_sys ) : status = _C . SDsetdatastrs ( self . _id , label , unit , format , coord_sys ) _checkErr ( 'setdatastrs' , status , 'cannot execute' )
Set the dataset standard string type attributes .
65
8
236,431
def setfillvalue ( self , fill_val ) : # Obtain SDS data type. try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'setfillvalue : cannot execute' ) n_values = 1 # Fill value stands for 1 value. if data_type == SDC . CHAR8 : buf = _C . array_byte ( n_values ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "setfillvalue: SDS has an illegal or " "unsupported type %d" % data_type ) buf [ 0 ] = fill_val status = _C . SDsetfillvalue ( self . _id , buf ) _checkErr ( 'setfillvalue' , status , 'cannot execute' )
Set the dataset fill value .
405
6
236,432
def setrange ( self , min , max ) : # Obtain SDS data type. try : sds_name , rank , dim_sizes , data_type , n_attrs = self . info ( ) except HDF4Error : raise HDF4Error ( 'setrange : cannot execute' ) n_values = 1 if data_type == SDC . CHAR8 : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf1 = _C . array_byte ( n_values ) buf2 = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf1 = _C . array_int8 ( n_values ) buf2 = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf1 = _C . array_int16 ( n_values ) buf2 = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf1 = _C . array_uint16 ( n_values ) buf2 = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf1 = _C . array_int32 ( n_values ) buf2 = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf1 = _C . array_uint32 ( n_values ) buf2 = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf1 = _C . array_float32 ( n_values ) buf2 = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf1 = _C . array_float64 ( n_values ) buf2 = _C . array_float64 ( n_values ) else : raise HDF4Error ( "SDsetrange: SDS has an illegal or " "unsupported type %d" % data_type ) buf1 [ 0 ] = max buf2 [ 0 ] = min status = _C . SDsetrange ( self . _id , buf1 , buf2 ) _checkErr ( 'setrange' , status , 'cannot execute' )
Set the dataset min and max values .
545
8
236,433
def getcompress ( self ) : status , comp_type , value , v2 , v3 , v4 , v5 = _C . _SDgetcompress ( self . _id ) _checkErr ( 'getcompress' , status , 'no compression' ) if comp_type == SDC . COMP_NONE : return ( comp_type , ) elif comp_type == SDC . COMP_SZIP : return comp_type , value , v2 , v3 , v4 , v5 else : return comp_type , value
Retrieves info about dataset compression type and mode .
121
11
236,434
def setcompress ( self , comp_type , value = 0 , v2 = 0 ) : status = _C . _SDsetcompress ( self . _id , comp_type , value , v2 ) _checkErr ( 'setcompress' , status , 'cannot execute' )
Compresses the dataset using a specified compression method .
65
10
236,435
def setexternalfile ( self , filename , offset = 0 ) : status = _C . SDsetexternalfile ( self . _id , filename , offset ) _checkErr ( 'setexternalfile' , status , 'execution error' )
Store the dataset data in an external file .
58
9
236,436
def dimensions ( self , full = 0 ) : # Get the number of dimensions and their lengths. nDims , dimLen = self . info ( ) [ 1 : 3 ] if isinstance ( dimLen , int ) : # need a sequence dimLen = [ dimLen ] # Check if the dataset is appendable. unlim = self . isrecord ( ) # Inquire each dimension res = { } for n in range ( nDims ) : d = self . dim ( n ) # The length reported by info() is 0 for an unlimited dimension. # Rather use the lengths reported by SDS.info() name , k , scaleType , nAtt = d . info ( ) length = dimLen [ n ] if full : res [ name ] = ( length , n , unlim and n == 0 , scaleType , nAtt ) else : res [ name ] = length return res
Return a dictionnary describing every dataset dimension .
187
10
236,437
def info ( self ) : status , dim_name , dim_size , data_type , n_attrs = _C . SDdiminfo ( self . _id ) _checkErr ( 'info' , status , 'cannot execute' ) return dim_name , dim_size , data_type , n_attrs
Return info about the dimension instance .
71
7
236,438
def setname ( self , dim_name ) : status = _C . SDsetdimname ( self . _id , dim_name ) _checkErr ( 'setname' , status , 'cannot execute' )
Set the dimension name .
48
5
236,439
def getscale ( self ) : # Get dimension info. If data_type is 0, no scale have been set # on the dimension. status , dim_name , dim_size , data_type , n_attrs = _C . SDdiminfo ( self . _id ) _checkErr ( 'getscale' , status , 'cannot execute' ) if data_type == 0 : raise HDF4Error ( "no scale set on that dimension" ) # dim_size is 0 for an unlimited dimension. The actual length is # obtained through SDgetinfo. if dim_size == 0 : dim_size = self . _sds . info ( ) [ 2 ] [ self . _index ] # Get scale values. if data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( dim_size ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( dim_size ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( dim_size ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( dim_size ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( dim_size ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( dim_size ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( dim_size ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( dim_size ) else : raise HDF4Error ( "getscale: dimension has an " "illegal or unsupported type %d" % data_type ) status = _C . SDgetdimscale ( self . _id , buf ) _checkErr ( 'getscale' , status , 'cannot execute' ) return _array_to_ret ( buf , dim_size )
Obtain the scale values along a dimension .
456
9
236,440
def setscale ( self , data_type , scale ) : try : n_values = len ( scale ) except : n_values = 1 # Validate args info = self . _sds . info ( ) if info [ 1 ] == 1 : dim_size = info [ 2 ] else : dim_size = info [ 2 ] [ self . _index ] if n_values != dim_size : raise HDF4Error ( 'number of scale values (%d) does not match ' 'dimension size (%d)' % ( n_values , dim_size ) ) if data_type == SDC . CHAR8 : buf = _C . array_byte ( n_values ) # Allow a string as the scale argument. # Becomes a noop if already a list. scale = list ( scale ) for n in range ( n_values ) : scale [ n ] = ord ( scale [ n ] ) elif data_type in [ SDC . UCHAR8 , SDC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == SDC . INT8 : buf = _C . array_int8 ( n_values ) elif data_type == SDC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == SDC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == SDC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == SDC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == SDC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == SDC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "setscale: illegal or usupported data_type" ) if n_values == 1 : buf [ 0 ] = scale else : for n in range ( n_values ) : buf [ n ] = scale [ n ] status = _C . SDsetdimscale ( self . _id , n_values , data_type , buf ) _checkErr ( 'setscale' , status , 'cannot execute' )
Initialize the scale values along the dimension .
516
9
236,441
def getstrs ( self ) : status , label , unit , format = _C . SDgetdimstrs ( self . _id , 128 ) _checkErr ( 'getstrs' , status , 'cannot execute' ) return label , unit , format
Retrieve the dimension standard string attributes .
57
8
236,442
def setstrs ( self , label , unit , format ) : status = _C . SDsetdimstrs ( self . _id , label , unit , format ) _checkErr ( 'setstrs' , status , 'cannot execute' )
Set the dimension standard string attributes .
55
7
236,443
def attach ( self , num_name , write = 0 ) : mode = write and 'w' or 'r' if isinstance ( num_name , str ) : num = self . find ( num_name ) else : num = num_name vd = _C . VSattach ( self . _hdf_inst . _id , num , mode ) if vd < 0 : _checkErr ( 'attach' , vd , 'cannot attach vdata' ) return VD ( self , vd )
Locate an existing vdata or create a new vdata in the HDF file returning a VD instance .
112
23
236,444
def create ( self , name , fields ) : try : # Create new vdata (-1), open in write mode (1) vd = self . attach ( - 1 , 1 ) # Set vdata name vd . _name = name # Define fields allNames = [ ] for name , type , order in fields : vd . fdefine ( name , type , order ) allNames . append ( name ) # Allocate fields to the vdata vd . setfields ( * allNames ) return vd except HDF4Error as msg : raise HDF4Error ( "error creating vdata (%s)" % msg )
Create a new vdata setting its name and allocating its fields .
134
14
236,445
def next ( self , vRef ) : num = _C . VSgetid ( self . _hdf_inst . _id , vRef ) _checkErr ( 'next' , num , 'cannot get next vdata' ) return num
Get the reference number of the vdata following a given vdata .
54
14
236,446
def vdatainfo ( self , listAttr = 0 ) : lst = [ ] ref = - 1 # start at beginning while True : try : nxtRef = self . next ( ref ) except HDF4Error : # no vdata left break # Attach the vdata and check for an "attribute" vdata. ref = nxtRef vdObj = self . attach ( ref ) if listAttr or not vdObj . _isattr : # Append a list of vdata properties. lst . append ( ( vdObj . _name , vdObj . _class , vdObj . _refnum , vdObj . _nrecs , vdObj . _nfields , vdObj . _nattrs , vdObj . _recsize , vdObj . _tag , vdObj . _interlace ) ) vdObj . detach ( ) return lst
Return info about all the file vdatas .
199
10
236,447
def storedata ( self , fieldName , values , data_type , vName , vClass ) : # See if the field is multi-valued. nrecs = len ( values ) if type ( values [ 0 ] ) in [ list , tuple ] : order = len ( values [ 0 ] ) # Replace input list with a flattened list. newValues = [ ] for el in values : for e in el : newValues . append ( e ) values = newValues else : order = 1 n_values = nrecs * order if data_type == HC . CHAR8 : buf = _C . array_byte ( n_values ) # Allow values to be passed as a string. # Noop if a list is passed. values = list ( values ) for n in range ( n_values ) : values [ n ] = ord ( values [ n ] ) elif data_type in [ HC . UCHAR8 , HC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == HC . INT8 : # SWIG refuses negative values here. We found that if we # pass them as byte values, it will work. buf = _C . array_int8 ( n_values ) values = list ( values ) for n in range ( n_values ) : v = values [ n ] if v >= 0 : v &= 0x7f else : v = abs ( v ) & 0x7f if v : v = 256 - v else : v = 128 # -128 in 2s complement values [ n ] = v elif data_type == HC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == HC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == HC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == HC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == HC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == HC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "storedata: illegal or unimplemented data_type" ) for n in range ( n_values ) : buf [ n ] = values [ n ] if order == 1 : vd = _C . VHstoredata ( self . _hdf_inst . _id , fieldName , buf , nrecs , data_type , vName , vClass ) else : vd = _C . VHstoredatam ( self . _hdf_inst . _id , fieldName , buf , nrecs , data_type , vName , vClass , order ) _checkErr ( 'storedata' , vd , 'cannot create vdata' ) return vd
Create and initialize a single field vdata returning the vdata reference number .
655
15
236,448
def field ( self , name_index ) : # Transform a name to an index number if isinstance ( name_index , str ) : status , index = _C . VSfindex ( self . _id , name_index ) _checkErr ( 'field' , status , "illegal field name: %s" % name_index ) else : n = _C . VFnfields ( self . _id ) _checkErr ( 'field' , n , 'cannot execute' ) index = name_index if index >= n : raise HDF4Error ( "field: illegal index number" ) return VDField ( self , index )
Get a VDField instance representing a field of the vdata .
140
14
236,449
def seek ( self , recIndex ) : if recIndex > self . _nrecs - 1 : if recIndex == self . _nrecs : return self . seekend ( ) else : raise HDF4Error ( "attempt to seek past last record" ) n = _C . VSseek ( self . _id , recIndex ) _checkErr ( 'seek' , n , 'cannot seek' ) self . _offset = n return n
Seek to the beginning of the record identified by its record index . A succeeding read will load this record in memory .
99
24
236,450
def inquire ( self ) : status , nRecs , interlace , fldNames , size , vName = _C . VSinquire ( self . _id ) _checkErr ( 'inquire' , status , "cannot query vdata info" ) return nRecs , interlace , fldNames . split ( ',' ) , size , vName
Retrieve info about the vdata .
79
8
236,451
def fieldinfo ( self ) : lst = [ ] for n in range ( self . _nfields ) : fld = self . field ( n ) lst . append ( ( fld . _name , fld . _type , fld . _order , fld . _nattrs , fld . _index , fld . _esize , fld . _isize ) ) return lst
Retrieve info about all vdata fields .
89
9
236,452
def sizeof ( self , fields ) : if type ( fields ) in [ tuple , list ] : str = ',' . join ( fields ) else : str = fields n = _C . VSsizeof ( self . _id , str ) _checkErr ( 'sizeof' , n , "cannot retrieve field sizes" ) return n
Retrieve the size in bytes of the given fields .
72
11
236,453
def fexist ( self , fields ) : if type ( fields ) in [ tuple , list ] : str = ',' . join ( fields ) else : str = fields ret = _C . VSfexist ( self . _id , str ) if ret < 0 : return 0 else : return 1
Check if a vdata contains a given set of fields .
63
12
236,454
def find ( self , name ) : try : att = self . attr ( name ) if att . _index is None : att = None except HDF4Error : att = None return att
Search the field for a given attribute .
41
8
236,455
def set ( self , data_type , values ) : try : n_values = len ( values ) except : values = [ values ] n_values = 1 if data_type == HC . CHAR8 : buf = _C . array_byte ( n_values ) # Allow values to be passed as a string. # Noop if a list is passed. values = list ( values ) for n in range ( n_values ) : if not isinstance ( values [ n ] , int ) : values [ n ] = ord ( values [ n ] ) elif data_type in [ HC . UCHAR8 , HC . UINT8 ] : buf = _C . array_byte ( n_values ) elif data_type == HC . INT8 : # SWIG refuses negative values here. We found that if we # pass them as byte values, it will work. buf = _C . array_int8 ( n_values ) values = list ( values ) for n in range ( n_values ) : v = values [ n ] if v >= 0 : v &= 0x7f else : v = abs ( v ) & 0x7f if v : v = 256 - v else : v = 128 # -128 in 2s complement values [ n ] = v elif data_type == HC . INT16 : buf = _C . array_int16 ( n_values ) elif data_type == HC . UINT16 : buf = _C . array_uint16 ( n_values ) elif data_type == HC . INT32 : buf = _C . array_int32 ( n_values ) elif data_type == HC . UINT32 : buf = _C . array_uint32 ( n_values ) elif data_type == HC . FLOAT32 : buf = _C . array_float32 ( n_values ) elif data_type == HC . FLOAT64 : buf = _C . array_float64 ( n_values ) else : raise HDF4Error ( "set: illegal or unimplemented data_type" ) for n in range ( n_values ) : buf [ n ] = values [ n ] status = _C . VSsetattr ( self . _vd_inst . _id , self . _fIndex , self . _name , data_type , n_values , buf ) _checkErr ( 'attr' , status , 'cannot execute' ) # Update the attribute index self . _index = _C . VSfindattr ( self . _vd_inst . _id , self . _fIndex , self . _name ) if self . _index < 0 : raise HDF4Error ( "set: error retrieving attribute index" )
Set the attribute value .
586
5
236,456
def getlibversion ( ) : status , major_v , minor_v , release , info = _C . Hgetlibversion ( ) _checkErr ( 'getlibversion' , status , "cannot get lib version" ) return major_v , minor_v , release , info
Get the library version info .
63
6
236,457
def getfileversion ( self ) : status , major_v , minor_v , release , info = _C . Hgetfileversion ( self . _id ) _checkErr ( 'getfileversion' , status , "cannot get file version" ) return major_v , minor_v , release , info
Get file version info .
68
5
236,458
def colorize ( lead , num , color ) : if num != 0 and ANSIBLE_COLOR and color is not None : return "%s%s%-15s" % ( stringc ( lead , color ) , stringc ( "=" , color ) , stringc ( str ( num ) , color ) ) else : return "%s=%-4s" % ( lead , str ( num ) )
Print lead = num in color
85
6
236,459
def timeline ( self , request , drip_id , into_past , into_future ) : from django . shortcuts import render , get_object_or_404 drip = get_object_or_404 ( Drip , id = drip_id ) shifted_drips = [ ] seen_users = set ( ) for shifted_drip in drip . drip . walk ( into_past = int ( into_past ) , into_future = int ( into_future ) + 1 ) : shifted_drip . prune ( ) shifted_drips . append ( { 'drip' : shifted_drip , 'qs' : shifted_drip . get_queryset ( ) . exclude ( id__in = seen_users ) } ) seen_users . update ( shifted_drip . get_queryset ( ) . values_list ( 'id' , flat = True ) ) return render ( request , 'drip/timeline.html' , locals ( ) )
Return a list of people who should get emails .
212
10
236,460
def walk ( self , into_past = 0 , into_future = 0 ) : walked_range = [ ] for shift in range ( - into_past , into_future ) : kwargs = dict ( drip_model = self . drip_model , name = self . name , now_shift_kwargs = { 'days' : shift } ) walked_range . append ( self . __class__ ( * * kwargs ) ) return walked_range
Walk over a date range and create new instances of self with new ranges .
99
15
236,461
def run ( self ) : if not self . drip_model . enabled : return None self . prune ( ) count = self . send ( ) return count
Get the queryset prune sent people and send it .
33
13
236,462
def prune ( self ) : target_user_ids = self . get_queryset ( ) . values_list ( 'id' , flat = True ) exclude_user_ids = SentDrip . objects . filter ( date__lt = conditional_now ( ) , drip = self . drip_model , user__id__in = target_user_ids ) . values_list ( 'user_id' , flat = True ) self . _queryset = self . get_queryset ( ) . exclude ( id__in = exclude_user_ids )
Do an exclude for all Users who have a SentDrip already .
123
14
236,463
def send ( self ) : if not self . from_email : self . from_email = getattr ( settings , 'DRIP_FROM_EMAIL' , settings . DEFAULT_FROM_EMAIL ) MessageClass = message_class_for ( self . drip_model . message_class ) count = 0 for user in self . get_queryset ( ) : message_instance = MessageClass ( self , user ) try : result = message_instance . message . send ( ) if result : SentDrip . objects . create ( drip = self . drip_model , user = user , from_email = self . from_email , from_email_name = self . from_email_name , subject = message_instance . subject , body = message_instance . body ) count += 1 except Exception as e : logging . error ( "Failed to send drip %s to user %s: %s" % ( self . drip_model . id , user , e ) ) return count
Send the message to each user on the queryset .
214
12
236,464
def angle ( self , other ) : return math . acos ( self . dot ( other ) / ( self . magnitude ( ) * other . magnitude ( ) ) )
Return the angle to the vector other
35
7
236,465
def project ( self , other ) : n = other . normalized ( ) return self . dot ( n ) * n
Return one vector projected on the vector other
24
8
236,466
def rotate_around ( self , axis , theta ) : # Adapted from equations published by Glenn Murray. # http://inside.mines.edu/~gmurray/ArbitraryAxisRotation/ArbitraryAxisRotation.html x , y , z = self . x , self . y , self . z u , v , w = axis . x , axis . y , axis . z # Extracted common factors for simplicity and efficiency r2 = u ** 2 + v ** 2 + w ** 2 r = math . sqrt ( r2 ) ct = math . cos ( theta ) st = math . sin ( theta ) / r dt = ( u * x + v * y + w * z ) * ( 1 - ct ) / r2 return Vector3 ( ( u * dt + x * ct + ( - w * y + v * z ) * st ) , ( v * dt + y * ct + ( w * x - u * z ) * st ) , ( w * dt + z * ct + ( - v * x + u * y ) * st ) )
Return the vector rotated around axis through angle theta .
247
11
236,467
def preparedir ( target_dir , remove_content = True ) : if os . path . isdir ( target_dir ) : if remove_content : nukedir ( target_dir , False ) return True else : try : os . makedirs ( target_dir ) return True except Exception as e : print ( "Failed to create folder: %s\n%s" % ( target_dir , e ) ) return False
Prepare a folder for analysis .
93
7
236,468
def nukedir ( target_dir , rmdir = False ) : d = os . path . normpath ( target_dir ) if not os . path . isdir ( d ) : return files = os . listdir ( d ) for f in files : if f == '.' or f == '..' : continue path = os . path . join ( d , f ) if os . path . isdir ( path ) : nukedir ( path ) else : try : os . remove ( path ) except Exception : print ( "Failed to remove %s" % path ) if rmdir : try : os . rmdir ( d ) except Exception : print ( "Failed to remove %s" % d )
Delete all the files inside target_dir .
155
9
236,469
def write_to_file_by_name ( folder , fname , data , mkdir = False ) : if not os . path . isdir ( folder ) : if mkdir : preparedir ( folder ) else : created = preparedir ( folder , False ) if not created : raise ValueError ( "Failed to find %s." % folder ) file_path = os . path . join ( folder , fname ) with open ( file_path , writemode ) as outf : try : outf . write ( str ( data ) ) return file_path except Exception as e : raise IOError ( "Failed to write %s to file:\n\t%s" % ( fname , str ( e ) ) )
Write a string of data to file by filename and folder .
157
12
236,470
def copy_files_to_folder ( files , target_folder , overwrite = True ) : if not files : return [ ] for f in files : target = os . path . join ( target_folder , os . path . split ( f ) [ - 1 ] ) if target == f : # both file path are the same! return target if os . path . exists ( target ) : if overwrite : # remove the file before copying try : os . remove ( target ) except Exception : raise IOError ( "Failed to remove %s" % f ) else : shutil . copy ( f , target ) else : continue else : print ( 'Copying %s to %s' % ( os . path . split ( f ) [ - 1 ] , os . path . normpath ( target_folder ) ) ) shutil . copy ( f , target ) return [ os . path . join ( target_folder , os . path . split ( f ) [ - 1 ] ) for f in files ]
Copy a list of files to a new target folder .
211
11
236,471
def bat_to_sh ( file_path ) : sh_file = file_path [ : - 4 ] + '.sh' with open ( file_path , 'rb' ) as inf , open ( sh_file , 'wb' ) as outf : outf . write ( '#!/usr/bin/env bash\n\n' ) for line in inf : # pass the path lines, etc to get to the commands if line . strip ( ) : continue else : break for line in inf : if line . startswith ( 'echo' ) : continue modified_line = line . replace ( 'c:\\radiance\\bin\\' , '' ) . replace ( '\\' , '/' ) outf . write ( modified_line ) print ( 'bash file is created at:\n\t%s' % sh_file ) # Heroku - Make command.sh executable st = os . stat ( sh_file ) os . chmod ( sh_file , st . st_mode | 0o111 ) return sh_file
Convert honeybee . bat file to . sh file .
224
12
236,472
def _download_py2 ( link , path , __hdr__ ) : try : req = urllib2 . Request ( link , headers = __hdr__ ) u = urllib2 . urlopen ( req ) except Exception as e : raise Exception ( ' Download failed with the error:\n{}' . format ( e ) ) with open ( path , 'wb' ) as outf : for l in u : outf . write ( l ) u . close ( )
Download a file from a link in Python 2 .
104
10
236,473
def _download_py3 ( link , path , __hdr__ ) : try : req = urllib . request . Request ( link , headers = __hdr__ ) u = urllib . request . urlopen ( req ) except Exception as e : raise Exception ( ' Download failed with the error:\n{}' . format ( e ) ) with open ( path , 'wb' ) as outf : for l in u : outf . write ( l ) u . close ( )
Download a file from a link in Python 3 .
106
10
236,474
def download_file_by_name ( url , target_folder , file_name , mkdir = False ) : # headers to "spoof" the download as coming from a browser (needed for E+ site) __hdr__ = { 'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 ' '(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11' , 'Accept' : 'text/html,application/xhtml+xml,' 'application/xml;q=0.9,*/*;q=0.8' , 'Accept-Charset' : 'ISO-8859-1,utf-8;q=0.7,*;q=0.3' , 'Accept-Encoding' : 'none' , 'Accept-Language' : 'en-US,en;q=0.8' , 'Connection' : 'keep-alive' } # create the target directory. if not os . path . isdir ( target_folder ) : if mkdir : preparedir ( target_folder ) else : created = preparedir ( target_folder , False ) if not created : raise ValueError ( "Failed to find %s." % target_folder ) file_path = os . path . join ( target_folder , file_name ) if ( sys . version_info < ( 3 , 0 ) ) : _download_py2 ( url , file_path , __hdr__ ) else : _download_py3 ( url , file_path , __hdr__ )
Download a file to a directory .
357
7
236,475
def unzip_file ( source_file , dest_dir = None , mkdir = False ) : # set default dest_dir and create it if need be. if dest_dir is None : dest_dir , fname = os . path . split ( source_file ) elif not os . path . isdir ( dest_dir ) : if mkdir : preparedir ( dest_dir ) else : created = preparedir ( dest_dir , False ) if not created : raise ValueError ( "Failed to find %s." % dest_dir ) # extract files to destination with zipfile . ZipFile ( source_file ) as zf : for member in zf . infolist ( ) : words = member . filename . split ( '\\' ) for word in words [ : - 1 ] : drive , word = os . path . splitdrive ( word ) head , word = os . path . split ( word ) if word in ( os . curdir , os . pardir , '' ) : continue dest_dir = os . path . join ( dest_dir , word ) zf . extract ( member , dest_dir )
Unzip a compressed file .
243
6
236,476
def csv_to_matrix ( csv_file_path ) : mtx = [ ] with open ( csv_file_path ) as csv_data_file : for row in csv_data_file : mtx . append ( row . split ( ',' ) ) return mtx
Load a CSV file into a Python matrix of strings .
66
11
236,477
def csv_to_num_matrix ( csv_file_path ) : mtx = [ ] with open ( csv_file_path ) as csv_data_file : for row in csv_data_file : mtx . append ( [ float ( val ) for val in row . split ( ',' ) ] ) return mtx
Load a CSV file consisting only of numbers into a Python matrix of floats .
77
15
236,478
def monthly_cooling_design_days_050 ( self ) : if self . monthly_found is False or self . _monthly_db_50 == [ ] or self . _monthly_wb_50 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_50 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_50 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '5% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ]
A list of 12 objects representing monthly 5 . 0% cooling design days .
255
15
236,479
def monthly_cooling_design_days_100 ( self ) : if self . monthly_found is False or self . _monthly_db_100 == [ ] or self . _monthly_wb_100 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_100 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_100 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '10% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ]
A list of 12 objects representing monthly 10 . 0% cooling design days .
255
15
236,480
def monthly_cooling_design_days_020 ( self ) : if self . monthly_found is False or self . _monthly_db_20 == [ ] or self . _monthly_wb_20 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_20 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_20 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '2% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ]
A list of 12 objects representing monthly 2 . 0% cooling design days .
255
15
236,481
def monthly_cooling_design_days_004 ( self ) : if self . monthly_found is False or self . _monthly_db_04 == [ ] or self . _monthly_wb_04 == [ ] : return [ ] else : db_conds = [ DryBulbCondition ( x , y ) for x , y in zip ( self . _monthly_db_04 , self . _monthly_db_range_50 ) ] hu_conds = [ HumidityCondition ( 'Wetbulb' , x , self . _stand_press_at_elev ) for x in self . _monthly_wb_04 ] ws_conds = self . monthly_wind_conditions sky_conds = self . monthly_clear_sky_conditions return [ DesignDay ( '0.4% Cooling Design Day for {}' . format ( self . _months [ i ] ) , 'SummerDesignDay' , self . _location , db_conds [ i ] , hu_conds [ i ] , ws_conds [ i ] , sky_conds [ i ] ) for i in xrange ( 12 ) ]
A list of 12 objects representing monthly 0 . 4% cooling design days .
257
15
236,482
def monthly_wind_conditions ( self ) : return [ WindCondition ( x , y ) for x , y in zip ( self . _monthly_wind , self . monthly_wind_dirs ) ]
A list of 12 monthly wind conditions that are used on the design days .
45
15
236,483
def monthly_wind_dirs ( self ) : mwd = zip ( * self . _monthly_wind_dirs ) return [ self . _wind_dirs [ mon . index ( max ( mon ) ) ] for mon in mwd ]
A list of prevailing wind directions for each month .
54
10
236,484
def monthly_clear_sky_conditions ( self ) : if self . _monthly_tau_diffuse is [ ] or self . _monthly_tau_beam is [ ] : return [ OriginalClearSkyCondition ( i , 21 ) for i in xrange ( 1 , 13 ) ] return [ RevisedClearSkyCondition ( i , 21 , x , y ) for i , x , y in zip ( list ( xrange ( 1 , 13 ) ) , self . _monthly_tau_beam , self . _monthly_tau_diffuse ) ]
A list of 12 monthly clear sky conditions that are used on the design days .
124
16
236,485
def to_json ( self ) : def jsonify_dict ( base_dict ) : new_dict = { } for key , val in base_dict . items ( ) : if isinstance ( val , list ) : new_dict [ key ] = [ v . to_json ( ) for v in val ] else : new_dict [ key ] = val . to_json ( ) return new_dict return { 'location' : self . location . to_json ( ) , 'ashrae_climate_zone' : self . ashrae_climate_zone , 'koppen_climate_zone' : self . koppen_climate_zone , 'extreme_cold_week' : self . extreme_cold_week . to_json ( ) if self . extreme_cold_week else None , 'extreme_hot_week' : self . extreme_hot_week . to_json ( ) if self . extreme_cold_week else None , 'typical_weeks' : jsonify_dict ( self . _typical_weeks ) , 'heating_dict' : self . _winter_des_day_dict , 'cooling_dict' : self . _summer_des_day_dict , "monthly_db_50" : self . _monthly_db_50 , "monthly_wb_50" : self . _monthly_wb_50 , "monthly_db_range_50" : self . _monthly_db_range_50 , "monthly_wb_range_50" : self . _monthly_wb_range_50 , "monthly_db_100" : self . _monthly_db_100 , "monthly_wb_100" : self . _monthly_wb_100 , "monthly_db_20" : self . _monthly_db_20 , "monthly_wb_20" : self . _monthly_wb_20 , "monthly_db_04" : self . _monthly_db_04 , "monthly_wb_04" : self . _monthly_wb_04 , "monthly_wind" : self . _monthly_wind , "monthly_wind_dirs" : self . _monthly_wind_dirs , "standard_pressure_at_elev" : self . standard_pressure_at_elev , "monthly_tau_beam" : self . monthly_tau_beam , "monthly_tau_diffuse" : self . monthly_tau_diffuse }
Convert the STAT object to a dictionary .
563
9
236,486
def from_json ( cls , data ) : assert 'name' in data , 'Required keyword "name" is missing!' assert 'data_type' in data , 'Required keyword "data_type" is missing!' if cls . _type_enumeration is None : cls . _type_enumeration = _DataTypeEnumeration ( import_modules = False ) if data [ 'data_type' ] == 'GenericType' : assert 'base_unit' in data , 'Keyword "base_unit" is missing and is required for GenericType.' return cls . _type_enumeration . _GENERICTYPE ( data [ 'name' ] , data [ 'base_unit' ] ) elif data [ 'data_type' ] in cls . _type_enumeration . _TYPES : clss = cls . _type_enumeration . _TYPES [ data [ 'data_type' ] ] if data [ 'data_type' ] == data [ 'name' ] . title ( ) . replace ( ' ' , '' ) : return clss ( ) else : instance = clss ( ) instance . _name = data [ 'name' ] return instance else : raise ValueError ( 'Data Type {} could not be recognized' . format ( data [ 'data_type' ] ) )
Create a data type from a dictionary .
293
8
236,487
def is_unit_acceptable ( self , unit , raise_exception = True ) : _is_acceptable = unit in self . units if _is_acceptable or raise_exception is False : return _is_acceptable else : raise ValueError ( '{0} is not an acceptable unit type for {1}. ' 'Choose from the following: {2}' . format ( unit , self . __class__ . __name__ , self . units ) )
Check if a certain unit is acceptable for the data type .
98
12
236,488
def _is_numeric ( self , values ) : if len ( values ) > 0 : assert isinstance ( values [ 0 ] , ( float , int ) ) , "values must be numbers to perform math operations. Got {}" . format ( type ( values [ 0 ] ) ) return True
Check to be sure values are numbers before doing numerical operations .
62
12
236,489
def _to_unit_base ( self , base_unit , values , unit , from_unit ) : self . _is_numeric ( values ) namespace = { 'self' : self , 'values' : values } if not from_unit == base_unit : self . is_unit_acceptable ( from_unit , True ) statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( from_unit ) , self . _clean ( base_unit ) ) values = eval ( statement , namespace ) namespace [ 'values' ] = values if not unit == base_unit : self . is_unit_acceptable ( unit , True ) statement = '[self._{}_to_{}(val) for val in values]' . format ( self . _clean ( base_unit ) , self . _clean ( unit ) ) values = eval ( statement , namespace ) return values
Return values in a given unit given the input from_unit .
200
13
236,490
def name ( self ) : if self . _name is None : return re . sub ( r"(?<=\w)([A-Z])" , r" \1" , self . __class__ . __name__ ) else : return self . _name
The data type name .
57
5
236,491
def from_json ( cls , data ) : # assign default values assert 'data_type' in data , 'Required keyword "data_type" is missing!' keys = ( 'data_type' , 'unit' , 'analysis_period' , 'metadata' ) for key in keys : if key not in data : data [ key ] = None data_type = DataTypeBase . from_json ( data [ 'data_type' ] ) ap = AnalysisPeriod . from_json ( data [ 'analysis_period' ] ) return cls ( data_type , data [ 'unit' ] , ap , data [ 'metadata' ] )
Create a header from a dictionary .
139
7
236,492
def duplicate ( self ) : a_per = self . analysis_period . duplicate ( ) if self . analysis_period else None return self . __class__ ( self . data_type , self . unit , a_per , deepcopy ( self . metadata ) )
Return a copy of the header .
56
7
236,493
def to_tuple ( self ) : return ( self . data_type , self . unit , self . analysis_period , self . metadata )
Return Ladybug header as a list .
31
8
236,494
def to_json ( self ) : a_per = self . analysis_period . to_json ( ) if self . analysis_period else None return { 'data_type' : self . data_type . to_json ( ) , 'unit' : self . unit , 'analysis_period' : a_per , 'metadata' : self . metadata }
Return a header as a dictionary .
77
7
236,495
def ashrae_clear_sky ( altitudes , month , sky_clearness = 1 ) : # apparent solar irradiation at air mass m = 0 MONTHLY_A = [ 1202 , 1187 , 1164 , 1130 , 1106 , 1092 , 1093 , 1107 , 1136 , 1166 , 1190 , 1204 ] # atmospheric extinction coefficient MONTHLY_B = [ 0.141 , 0.142 , 0.149 , 0.164 , 0.177 , 0.185 , 0.186 , 0.182 , 0.165 , 0.152 , 0.144 , 0.141 ] dir_norm_rad = [ ] dif_horiz_rad = [ ] for i , alt in enumerate ( altitudes ) : if alt > 0 : try : dir_norm = MONTHLY_A [ month - 1 ] / ( math . exp ( MONTHLY_B [ month - 1 ] / ( math . sin ( math . radians ( alt ) ) ) ) ) diff_horiz = 0.17 * dir_norm * math . sin ( math . radians ( alt ) ) dir_norm_rad . append ( dir_norm * sky_clearness ) dif_horiz_rad . append ( diff_horiz * sky_clearness ) except OverflowError : # very small altitude values dir_norm_rad . append ( 0 ) dif_horiz_rad . append ( 0 ) else : # night time dir_norm_rad . append ( 0 ) dif_horiz_rad . append ( 0 ) return dir_norm_rad , dif_horiz_rad
Calculate solar flux for an original ASHRAE Clear Sky
354
14
236,496
def zhang_huang_solar ( alt , cloud_cover , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed , irr_0 = 1355 ) : # zhang-huang solar model regression constants C0 , C1 , C2 , C3 , C4 , C5 , D_COEFF , K_COEFF = 0.5598 , 0.4982 , - 0.6762 , 0.02842 , - 0.00317 , 0.014 , - 17.853 , 0.843 # start assuming night time glob_ir = 0 if alt > 0 : # get sin of the altitude sin_alt = math . sin ( math . radians ( alt ) ) # shortened and converted versions of the input parameters cc , rh , n_temp , n3_temp , w_spd = cloud_cover / 10.0 , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed # calculate zhang-huang global radiation glob_ir = ( ( irr_0 * sin_alt * ( C0 + ( C1 * cc ) + ( C2 * cc ** 2 ) + ( C3 * ( n_temp - n3_temp ) ) + ( C4 * rh ) + ( C5 * w_spd ) ) ) + D_COEFF ) / K_COEFF if glob_ir < 0 : glob_ir = 0 return glob_ir
Calculate global horizontal solar irradiance using the Zhang - Huang model .
334
15
236,497
def zhang_huang_solar_split ( altitudes , doys , cloud_cover , relative_humidity , dry_bulb_present , dry_bulb_t3_hrs , wind_speed , atm_pressure , use_disc = False ) : # Calculate global horizontal irradiance using the original zhang-huang model glob_ir = [ ] for i in range ( len ( altitudes ) ) : ghi = zhang_huang_solar ( altitudes [ i ] , cloud_cover [ i ] , relative_humidity [ i ] , dry_bulb_present [ i ] , dry_bulb_t3_hrs [ i ] , wind_speed [ i ] ) glob_ir . append ( ghi ) if use_disc is False : # Calculate dew point temperature to improve the splitting of direct + diffuse temp_dew = [ dew_point_from_db_rh ( dry_bulb_present [ i ] , relative_humidity [ i ] ) for i in range ( len ( glob_ir ) ) ] # Split global rad into direct + diffuse using dirint method (aka. Perez split) dir_norm_rad = dirint ( glob_ir , altitudes , doys , atm_pressure , use_delta_kt_prime = True , temp_dew = temp_dew ) # Calculate diffuse horizontal from dni and ghi. dif_horiz_rad = [ glob_ir [ i ] - ( dir_norm_rad [ i ] * math . sin ( math . radians ( altitudes [ i ] ) ) ) for i in range ( len ( glob_ir ) ) ] else : dir_norm_rad = [ ] dif_horiz_rad = [ ] for i in range ( len ( glob_ir ) ) : dni , kt , am = disc ( glob_ir [ i ] , altitudes [ i ] , doys [ i ] , atm_pressure [ i ] ) dhi = glob_ir [ i ] - ( dni * math . sin ( math . radians ( altitudes [ i ] ) ) ) dir_norm_rad . append ( dni ) dif_horiz_rad . append ( dhi ) return dir_norm_rad , dif_horiz_rad
Calculate direct and diffuse solar irradiance using the Zhang - Huang model .
508
16
236,498
def calc_horizontal_infrared ( sky_cover , dry_bulb , dew_point ) : # stefan-boltzmann constant SIGMA = 5.6697e-8 # convert to kelvin db_k = dry_bulb + 273.15 dp_k = dew_point + 273.15 # calculate sky emissivity and horizontal ir sky_emiss = ( 0.787 + ( 0.764 * math . log ( dp_k / 273.15 ) ) ) * ( 1 + ( 0.022 * sky_cover ) - ( 0.0035 * ( sky_cover ** 2 ) ) + ( 0.00028 * ( sky_cover ** 3 ) ) ) horiz_ir = sky_emiss * SIGMA * ( db_k ** 4 ) return horiz_ir
Calculate horizontal infrared radiation intensity .
183
8
236,499
def set_domain ( self , values ) : _flattenedList = sorted ( flatten ( values ) ) self . domain = tuple ( _flattenedList [ 0 ] if d == 'min' else d for d in self . domain ) self . domain = tuple ( _flattenedList [ - 1 ] if d == 'max' else d for d in self . domain )
Set domain of the colors based on min and max of a list of values .
83
16