signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _get_cols ( fields , schema ) :
"""Get column metadata for Google Charts based on field list and schema .""" | typemap = { 'STRING' : 'string' , 'INT64' : 'number' , 'INTEGER' : 'number' , 'FLOAT' : 'number' , 'FLOAT64' : 'number' , 'BOOL' : 'boolean' , 'BOOLEAN' : 'boolean' , 'DATE' : 'date' , 'TIME' : 'timeofday' , 'DATETIME' : 'datetime' , 'TIMESTAMP' : 'datetime' }
cols = [ ]
for col in fields :
if schema :
f = schema [ col ]
t = 'string' if f . mode == 'REPEATED' else typemap . get ( f . data_type , 'string' )
cols . append ( { 'id' : f . name , 'label' : f . name , 'type' : t } )
else : # This will only happen if we had no rows to infer a schema from , so the type
# is not really important , except that GCharts will choke if we pass such a schema
# to a chart if it is string x string so we default to number .
cols . append ( { 'id' : col , 'label' : col , 'type' : 'number' } )
return cols |
def _load_local_tzinfo ( ) :
"""Load zoneinfo from local disk .""" | tzdir = os . environ . get ( "TZDIR" , "/usr/share/zoneinfo/posix" )
localtzdata = { }
for dirpath , _ , filenames in os . walk ( tzdir ) :
for filename in filenames :
filepath = os . path . join ( dirpath , filename )
name = os . path . relpath ( filepath , tzdir )
f = open ( filepath , "rb" )
tzinfo = pytz . tzfile . build_tzinfo ( name , f )
f . close ( )
localtzdata [ name ] = tzinfo
return localtzdata |
def map_statements ( self ) :
"""Run the ontology mapping on the statements .""" | for stmt in self . statements :
for agent in stmt . agent_list ( ) :
if agent is None :
continue
all_mappings = [ ]
for db_name , db_id in agent . db_refs . items ( ) :
if isinstance ( db_id , list ) :
db_id = db_id [ 0 ] [ 0 ]
mappings = self . _map_id ( db_name , db_id )
all_mappings += mappings
for map_db_name , map_db_id , score , orig_db_name in all_mappings :
if map_db_name in agent . db_refs :
continue
if self . scored : # If the original one is a scored grounding ,
# we take that score and multiply it with the mapping
# score . Otherwise we assume the original score is 1.
try :
orig_score = agent . db_refs [ orig_db_name ] [ 0 ] [ 1 ]
except Exception :
orig_score = 1.0
agent . db_refs [ map_db_name ] = [ ( map_db_id , score * orig_score ) ]
else :
if map_db_name in ( 'UN' , 'HUME' ) :
agent . db_refs [ map_db_name ] = [ ( map_db_id , 1.0 ) ]
else :
agent . db_refs [ map_db_name ] = map_db_id |
def _get_perf ( text , msg_id ) :
"""Return a request message for a given text .""" | msg = KQMLPerformative ( 'REQUEST' )
msg . set ( 'receiver' , 'READER' )
content = KQMLList ( 'run-text' )
content . sets ( 'text' , text )
msg . set ( 'content' , content )
msg . set ( 'reply-with' , msg_id )
return msg |
def get_first_properties ( elt , keys = None , ctx = None ) :
"""Get first properties related to one input key .
: param elt : first property elt . Not None methods .
: param list keys : property keys to get .
: param ctx : elt ctx from where get properties . Equals elt if None . It
allows to get function properties related to a class or instance if
related function is defined in base class .
: return : dict of first values of elt properties .""" | # ensure keys is an iterable if not None
if isinstance ( keys , string_types ) :
keys = ( keys , )
result = _get_properties ( elt , keys = keys , first = True , ctx = ctx )
return result |
def get_logger ( cls ) :
"""Initializes and returns our logger instance .""" | if cls . logger is None :
cls . logger = logging . getLogger ( "django_auth_ldap" )
cls . logger . addHandler ( logging . NullHandler ( ) )
return cls . logger |
def create_shipment ( self , data = None ) :
"""Create a shipment for an order . When no data arg is given , a shipment for all order lines is assumed .""" | if data is None :
data = { 'lines' : [ ] }
return Shipments ( self . client ) . on ( self ) . create ( data ) |
def set_guest_access ( self , allow_guests ) :
"""Set whether guests can join the room and return True if successful .""" | guest_access = "can_join" if allow_guests else "forbidden"
try :
self . client . api . set_guest_access ( self . room_id , guest_access )
self . guest_access = allow_guests
return True
except MatrixRequestError :
return False |
def order_manually ( sub_commands ) :
"""Order sub - commands for display""" | order = [ "start" , "projects" , ]
ordered = [ ]
commands = dict ( zip ( [ cmd for cmd in sub_commands ] , sub_commands ) )
for k in order :
ordered . append ( commands . get ( k , "" ) )
if k in commands :
del commands [ k ]
# Add commands not present in ` order ` above
for k in commands :
ordered . append ( commands [ k ] )
return ordered |
def _parse_sheet ( workbook , sheet ) :
"""The universal spreadsheet parser . Parse chron or paleo tables of type ensemble / model / summary .
: param str name : Filename
: param obj workbook : Excel Workbook
: param dict sheet : Sheet path and naming info
: return dict dict : Table metadata and numeric data""" | logger_excel . info ( "enter parse_sheet: {}" . format ( sheet [ "old_name" ] ) )
# Markers to track where we are on the sheet
ensemble_on = False
var_header_done = False
metadata_on = False
metadata_done = False
data_on = False
notes = False
# Open the sheet from the workbook
temp_sheet = workbook . sheet_by_name ( sheet [ "old_name" ] )
filename = sheet [ "filename" ]
# Store table metadata and numeric data separately
table_name = "{}DataTableName" . format ( sheet [ "paleo_chron" ] )
# Organize our root table data
table_metadata = OrderedDict ( )
table_metadata [ table_name ] = sheet [ "new_name" ]
table_metadata [ 'filename' ] = filename
table_metadata [ 'missingValue' ] = 'nan'
if "ensemble" in sheet [ "new_name" ] :
ensemble_on = True
# Store all CSV in here by rows
table_data = { filename : [ ] }
# Master list of all column metadata
column_metadata = [ ]
# Index tracks which cells are being parsed
num_col = 0
num_row = 0
nrows = temp_sheet . nrows
col_total = 0
# Tracks which " number " each metadata column is assigned
col_add_ct = 1
header_keys = [ ]
variable_keys = [ ]
variable_keys_lower = [ ]
mv = ""
try : # Loop for every row in the sheet
for i in range ( 0 , nrows ) : # Hold the contents of the current cell
cell = temp_sheet . cell_value ( num_row , num_col )
row = temp_sheet . row ( num_row )
# Skip all template lines
if isinstance ( cell , str ) : # Note and missing value entries are rogue . They are not close to the other data entries .
if cell . lower ( ) . strip ( ) not in EXCEL_TEMPLATE :
if "notes" in cell . lower ( ) and not metadata_on : # Store at the root table level
nt = temp_sheet . cell_value ( num_row , 1 )
if nt not in EXCEL_TEMPLATE :
table_metadata [ "notes" ] = nt
elif cell . lower ( ) . strip ( ) in ALTS_MV : # Store at the root table level and in our function
mv = temp_sheet . cell_value ( num_row , 1 )
# Add if not placeholder value
if mv not in EXCEL_TEMPLATE :
table_metadata [ "missingValue" ] = mv
# Variable template header row
elif cell . lower ( ) in EXCEL_HEADER and not metadata_on and not data_on : # Grab the header line
row = temp_sheet . row ( num_row )
header_keys = _get_header_keys ( row )
# Turn on the marker
var_header_done = True
# Data section ( bottom of sheet )
elif data_on : # Parse the row , clean , and add to table _ data
table_data = _parse_sheet_data_row ( temp_sheet , num_row , col_total , table_data , filename , mv )
# Metadata section . ( top )
elif metadata_on : # Reached an empty cell while parsing metadata . Mark the end of the section .
if cell in EMPTY :
metadata_on = False
metadata_done = True
# Create a list of all the variable names found
for entry in column_metadata :
try : # var keys is used as the variableName entry in each column ' s metadata
variable_keys . append ( entry [ "variableName" ] . strip ( ) )
# var keys lower is used for comparing and finding the data header row
variable_keys_lower . append ( entry [ "variableName" ] . lower ( ) . strip ( ) )
except KeyError : # missing a variableName key
pass
# Not at the end of the section yet . Parse the metadata
else : # Get the row data
row = temp_sheet . row ( num_row )
# Get column metadata
col_tmp = _compile_column_metadata ( row , header_keys , col_add_ct )
# Append to master list
column_metadata . append ( col_tmp )
col_add_ct += 1
# Variable metadata , if variable header exists
elif var_header_done and not metadata_done : # Start piecing column metadata together with their respective variable keys
metadata_on = True
# Get the row data
row = temp_sheet . row ( num_row )
# Get column metadata
col_tmp = _compile_column_metadata ( row , header_keys , col_add_ct )
# Append to master list
column_metadata . append ( col_tmp )
col_add_ct += 1
# Variable metadata , if variable header does not exist
elif not var_header_done and not metadata_done and cell : # LiPD Version 1.1 and earlier : Chronology sheets don ' t have variable headers
# We could blindly parse , but without a header row _ num we wouldn ' t know where
# to save the metadata
# Play it safe and assume data for first column only : variable name
metadata_on = True
# Get the row data
row = temp_sheet . row ( num_row )
# Get column metadata
col_tmp = _compile_column_metadata ( row , header_keys , col_add_ct )
# Append to master list
column_metadata . append ( col_tmp )
col_add_ct += 1
# Data variable header row . Column metadata exists and metadata _ done marker is on .
# This is where we compare top section variableNames to bottom section variableNames to see if
# we need to start parsing the column values
else :
try : # Clean up variable _ keys _ lower so we all variable names change from " age ( yrs BP ) " to " age "
# Units in parenthesis make it too difficult to compare variables . Remove them .
row = _rm_units_from_var_names_multi ( row )
if metadata_done and any ( i in row for i in variable_keys_lower ) :
data_on = True
# Take the difference of the two lists . If anything exists , then that ' s a problem
__compare_vars ( row , variable_keys_lower , sheet [ "old_name" ] )
# Ensemble columns are counted differently .
if ensemble_on : # Get the next row , and count the data cells .
col_total = len ( temp_sheet . row ( num_row + 1 ) )
# If there ' s an empty row , between , then try the next row .
if col_total < 2 :
col_total = temp_sheet . row ( num_row + 2 )
try :
ens_cols = [ ]
[ ens_cols . append ( i + 1 ) for i in range ( 0 , col_total - 1 ) ]
column_metadata [ 1 ] [ "number" ] = ens_cols
except IndexError :
logger_excel . debug ( "excel: parse_sheet: unable to add ensemble 'number' key" )
except KeyError :
logger_excel . debug ( "excel: parse_sheet: unable to add ensemble 'number' list at key" )
# All other cass , columns are the length of column _ metadata
else :
col_total = len ( column_metadata )
except AttributeError :
pass
# cell is not a string , and lower ( ) was not a valid call .
# If this is a numeric cell , 99 % chance it ' s parsing the data columns .
elif isinstance ( cell , float ) or isinstance ( cell , int ) :
if data_on or metadata_done : # Parse the row , clean , and add to table _ data
table_data = _parse_sheet_data_row ( temp_sheet , num_row , col_total , table_data , filename , mv )
# Move on to the next row
num_row += 1
table_metadata [ "columns" ] = column_metadata
except IndexError as e :
logger_excel . debug ( "parse_sheet: IndexError: sheet: {}, row_num: {}, col_num: {}, {}" . format ( sheet , num_row , num_col , e ) )
# If there isn ' t any data in this sheet , and nothing was parsed , don ' t let this
# move forward to final output .
if not table_data [ filename ] :
table_data = None
table_metadata = None
logger_excel . info ( "exit parse_sheet" )
return table_metadata , table_data |
def get_deffacts ( self ) :
"""Return the existing deffacts sorted by the internal order""" | return sorted ( self . _get_by_type ( DefFacts ) , key = lambda d : d . order ) |
def import_path ( self ) :
"""The full remote import path as used in import statements in ` . go ` source files .""" | return os . path . join ( self . remote_root , self . pkg ) if self . pkg else self . remote_root |
def chunkify ( chunksize ) :
"""Very stupid " chunk vectorizer " which keeps memory use down .
This version requires all inputs to have the same number of elements ,
although it shouldn ' t be that hard to implement simple broadcasting .""" | def chunkifier ( func ) :
def wrap ( * args ) :
assert len ( args ) > 0
assert all ( len ( a . flat ) == len ( args [ 0 ] . flat ) for a in args )
nelements = len ( args [ 0 ] . flat )
nchunks , remain = divmod ( nelements , chunksize )
out = np . ndarray ( args [ 0 ] . shape )
for start in range ( 0 , nelements , chunksize ) : # print ( start )
stop = start + chunksize
if start + chunksize > nelements :
stop = nelements - start
iargs = tuple ( a . flat [ start : stop ] for a in args )
out . flat [ start : stop ] = func ( * iargs )
return out
return wrap
return chunkifier |
def from_string ( cls , line , ignore_bad_cookies = False , ignore_bad_attributes = True ) :
"Construct a Cookie object from a line of Set - Cookie header data ." | cookie_dict = parse_one_response ( line , ignore_bad_cookies = ignore_bad_cookies , ignore_bad_attributes = ignore_bad_attributes )
if not cookie_dict :
return None
return cls . from_dict ( cookie_dict , ignore_bad_attributes = ignore_bad_attributes ) |
def from_dict ( data , ctx ) :
"""Instantiate a new Trade from a dict ( generally from loading a JSON
response ) . The data used to instantiate the Trade is a shallow copy of
the dict passed in , with any complex child types instantiated
appropriately .""" | data = data . copy ( )
if data . get ( 'price' ) is not None :
data [ 'price' ] = ctx . convert_decimal_number ( data . get ( 'price' ) )
if data . get ( 'initialUnits' ) is not None :
data [ 'initialUnits' ] = ctx . convert_decimal_number ( data . get ( 'initialUnits' ) )
if data . get ( 'initialMarginRequired' ) is not None :
data [ 'initialMarginRequired' ] = ctx . convert_decimal_number ( data . get ( 'initialMarginRequired' ) )
if data . get ( 'currentUnits' ) is not None :
data [ 'currentUnits' ] = ctx . convert_decimal_number ( data . get ( 'currentUnits' ) )
if data . get ( 'realizedPL' ) is not None :
data [ 'realizedPL' ] = ctx . convert_decimal_number ( data . get ( 'realizedPL' ) )
if data . get ( 'unrealizedPL' ) is not None :
data [ 'unrealizedPL' ] = ctx . convert_decimal_number ( data . get ( 'unrealizedPL' ) )
if data . get ( 'marginUsed' ) is not None :
data [ 'marginUsed' ] = ctx . convert_decimal_number ( data . get ( 'marginUsed' ) )
if data . get ( 'averageClosePrice' ) is not None :
data [ 'averageClosePrice' ] = ctx . convert_decimal_number ( data . get ( 'averageClosePrice' ) )
if data . get ( 'financing' ) is not None :
data [ 'financing' ] = ctx . convert_decimal_number ( data . get ( 'financing' ) )
if data . get ( 'clientExtensions' ) is not None :
data [ 'clientExtensions' ] = ctx . transaction . ClientExtensions . from_dict ( data [ 'clientExtensions' ] , ctx )
if data . get ( 'takeProfitOrder' ) is not None :
data [ 'takeProfitOrder' ] = ctx . order . TakeProfitOrder . from_dict ( data [ 'takeProfitOrder' ] , ctx )
if data . get ( 'stopLossOrder' ) is not None :
data [ 'stopLossOrder' ] = ctx . order . StopLossOrder . from_dict ( data [ 'stopLossOrder' ] , ctx )
if data . get ( 'trailingStopLossOrder' ) is not None :
data [ 'trailingStopLossOrder' ] = ctx . order . TrailingStopLossOrder . from_dict ( data [ 'trailingStopLossOrder' ] , ctx )
return Trade ( ** data ) |
def enable_chimera ( verbose = False , nogui = True ) :
"""Bypass script loading and initialize Chimera correctly , once
the env has been properly patched .
Parameters
verbose : bool , optional , default = False
If True , let Chimera speak freely . It can be _ very _ verbose .
nogui : bool , optional , default = True
Don ' t start the GUI .""" | if os . getenv ( 'CHIMERA_ENABLED' ) :
return
import chimera
_pre_gui_patches ( )
if not nogui :
chimera . registerPostGraphicsFunc ( _post_gui_patches )
try :
import chimeraInit
if verbose :
chimera . replyobj . status ( 'initializing pychimera' )
except ImportError as e :
sys . exit ( str ( e ) + "\nERROR: Chimera could not be loaded!" )
chimeraInit . init ( [ '' , '--script' , NULL ] + ( sys . argv [ 1 : ] if not nogui else [ ] ) , debug = verbose , silent = not verbose , nostatus = not verbose , nogui = nogui , eventloop = not nogui , exitonquit = not nogui , title = chimera . title + ' (PyChimera)' )
os . environ [ 'CHIMERA_ENABLED' ] = '1' |
def validate ( self , instance , value ) :
"""Check if value is a valid datetime object or JSON datetime string""" | if isinstance ( value , datetime . datetime ) :
return value
if not isinstance ( value , string_types ) :
self . error ( instance = instance , value = value , extra = 'Cannot convert non-strings to datetime.' , )
try :
return self . from_json ( value )
except ValueError :
self . error ( instance = instance , value = value , extra = 'Invalid format for converting to datetime.' , ) |
def unitcheck ( u , nonperiodic = None ) :
"""Check whether ` u ` is inside the unit cube . Given a masked array
` nonperiodic ` , also allows periodic boundaries conditions to exceed
the unit cube .""" | if nonperiodic is None : # No periodic boundary conditions provided .
return np . all ( u > 0. ) and np . all ( u < 1. )
else : # Alternating periodic and non - periodic boundary conditions .
return ( np . all ( u [ nonperiodic ] > 0. ) and np . all ( u [ nonperiodic ] < 1. ) and np . all ( u [ ~ nonperiodic ] > - 0.5 ) and np . all ( u [ ~ nonperiodic ] < 1.5 ) ) |
def get_constrained_fc2 ( supercell , dataset_second_atoms , atom1 , reduced_site_sym , symprec ) :
"""dataset _ second _ atoms : [ { ' number ' : 7,
' displacement ' : [ ] ,
' delta _ forces ' : [ ] } , . . . ]""" | lattice = supercell . get_cell ( ) . T
positions = supercell . get_scaled_positions ( )
num_atom = supercell . get_number_of_atoms ( )
fc2 = np . zeros ( ( num_atom , num_atom , 3 , 3 ) , dtype = 'double' )
atom_list = np . unique ( [ x [ 'number' ] for x in dataset_second_atoms ] )
for atom2 in atom_list :
disps2 = [ ]
sets_of_forces = [ ]
for disps_second in dataset_second_atoms :
if atom2 != disps_second [ 'number' ] :
continue
bond_sym = get_bond_symmetry ( reduced_site_sym , lattice , positions , atom1 , atom2 , symprec )
disps2 . append ( disps_second [ 'displacement' ] )
sets_of_forces . append ( disps_second [ 'delta_forces' ] )
solve_force_constants ( fc2 , atom2 , disps2 , sets_of_forces , supercell , bond_sym , symprec )
# Shift positions according to set atom1 is at origin
pos_center = positions [ atom1 ] . copy ( )
positions -= pos_center
rotations = np . array ( reduced_site_sym , dtype = 'intc' , order = 'C' )
translations = np . zeros ( ( len ( reduced_site_sym ) , 3 ) , dtype = 'double' , order = 'C' )
permutations = compute_all_sg_permutations ( positions , rotations , translations , lattice , symprec )
distribute_force_constants ( fc2 , atom_list , lattice , rotations , permutations )
return fc2 |
def check_authorization ( self , access_token ) :
"""OAuth applications can use this method to check token validity
without hitting normal rate limits because of failed login attempts .
If the token is valid , it will return True , otherwise it will return
False .
: returns : bool""" | p = self . _session . params
auth = ( p . get ( 'client_id' ) , p . get ( 'client_secret' ) )
if access_token and auth :
url = self . _build_url ( 'applications' , str ( auth [ 0 ] ) , 'tokens' , str ( access_token ) )
resp = self . _get ( url , auth = auth , params = { 'client_id' : None , 'client_secret' : None } )
return self . _boolean ( resp , 200 , 404 )
return False |
def computePhase2 ( self , doLearn = False ) :
"""This is the phase 2 of learning , inference and multistep prediction . During
this phase , all the cell with lateral support have their predictedState
turned on and the firing segments are queued up for updates .
Parameters :
doLearn : Boolean flag to queue segment updates during learning
retval : ?""" | # Phase 2 : compute predicted state for each cell
# - if a segment has enough horizontal connections firing because of
# bottomUpInput , it ' s set to be predicting , and we queue up the segment
# for reinforcement ,
# - if pooling is on , try to find the best weakly activated segment to
# reinforce it , else create a new pooling segment .
for c in xrange ( self . numberOfCols ) :
buPredicted = False
# whether any cell in the column is predicted
for i in xrange ( self . cellsPerColumn ) : # Iterate over each of the segments of this cell
maxConfidence = 0
for s in self . cells [ c ] [ i ] : # sum ( connected synapses ) > = activationThreshold ?
if self . isSegmentActive ( s , self . activeState [ 't' ] ) :
self . predictedState [ 't' ] [ c , i ] = 1
buPredicted = True
maxConfidence = max ( maxConfidence , s . dutyCycle ( readOnly = True ) )
if doLearn :
s . totalActivations += 1
# increment activationFrequency
s . lastActiveIteration = self . iterationIdx
# mark this segment for learning
activeUpdate = self . getSegmentActiveSynapses ( c , i , s , 't' )
activeUpdate . phase1Flag = False
self . addToSegmentUpdates ( c , i , activeUpdate )
# Store the max confidence seen among all the weak and strong segments
# as the cell ' s confidence .
self . confidence [ 't' ] [ c , i ] = maxConfidence |
def connect ( self , address ) :
"""Equivalent to socket . connect ( ) , but sends an client handshake request
after connecting .
` address ` is a ( host , port ) tuple of the server to connect to .""" | self . sock . connect ( address )
ClientHandshake ( self ) . perform ( )
self . handshake_sent = True |
def deprecate ( message ) :
"""Loudly prints warning .""" | warnings . simplefilter ( 'default' )
warnings . warn ( message , category = DeprecationWarning )
warnings . resetwarnings ( ) |
def call_id_function ( opts ) :
'''Evaluate the function that determines the ID if the ' id _ function '
option is set and return the result''' | if opts . get ( 'id' ) :
return opts [ 'id' ]
# Import ' salt . loader ' here to avoid a circular dependency
import salt . loader as loader
if isinstance ( opts [ 'id_function' ] , six . string_types ) :
mod_fun = opts [ 'id_function' ]
fun_kwargs = { }
elif isinstance ( opts [ 'id_function' ] , dict ) :
mod_fun , fun_kwargs = six . next ( six . iteritems ( opts [ 'id_function' ] ) )
if fun_kwargs is None :
fun_kwargs = { }
else :
log . error ( '\'id_function\' option is neither a string nor a dictionary' )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
# split module and function and try loading the module
mod , fun = mod_fun . split ( '.' )
if not opts . get ( 'grains' ) : # Get grains for use by the module
opts [ 'grains' ] = loader . grains ( opts )
try :
id_mod = loader . raw_mod ( opts , mod , fun )
if not id_mod :
raise KeyError
# we take whatever the module returns as the minion ID
newid = id_mod [ mod_fun ] ( ** fun_kwargs )
if not isinstance ( newid , six . string_types ) or not newid :
log . error ( 'Function %s returned value "%s" of type %s instead of string' , mod_fun , newid , type ( newid ) )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
log . info ( 'Evaluated minion ID from module: %s' , mod_fun )
return newid
except TypeError :
log . error ( 'Function arguments %s are incorrect for function %s' , fun_kwargs , mod_fun )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
except KeyError :
log . error ( 'Failed to load module %s' , mod_fun )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC ) |
def h_boiling_Yan_Lin ( m , x , Dh , rhol , rhog , mul , kl , Hvap , Cpl , q , A_channel_flow ) :
r'''Calculates the two - phase boiling heat transfer coefficient of a
liquid and gas flowing inside a plate and frame heat exchanger , as
developed in [ 1 ] _ . Reviewed in [ 2 ] _ , [ 3 ] _ , [ 4 ] _ , and [ 5 ] _ .
. . math : :
h = 1.926 \ left ( \ frac { k _ l } { D _ h } \ right ) Re _ { eq } Pr _ l ^ { 1/3 } Bo _ { eq } ^ { 0.3}
Re ^ { - 0.5}
Re _ { eq } = \ frac { G _ { eq } D _ h } { \ mu _ l }
Bo _ { eq } = \ frac { q } { G _ { eq } H _ { vap } }
G _ { eq } = \ frac { m } { A _ { flow } } \ left [ 1 - x + x \ left ( \ frac { \ rho _ l } { \ rho _ g }
\ right ) ^ { 1/2 } \ right ]
Re = \ frac { G D _ h } { \ mu _ l }
Claimed to be valid for : math : ` 2000 < Re _ { eq } < 10000 ` .
Parameters
m : float
Mass flow rate [ kg / s ]
x : float
Quality at the specific point in the plate exchanger [ ]
Dh : float
Hydraulic diameter of the plate , : math : ` D _ h = \ frac { 4 \ lambda } { \ phi } ` [ m ]
rhol : float
Density of the liquid [ kg / m ^ 3]
rhog : float
Density of the gas [ kg / m ^ 3]
mul : float
Viscosity of the liquid [ Pa * s ]
kl : float
Thermal conductivity of liquid [ W / m / K ]
Hvap : float
Heat of vaporization of the fluid at the system pressure , [ J / kg ]
Cpl : float
Heat capacity of liquid [ J / kg / K ]
q : float
Heat flux , [ W / m ^ 2]
A _ channel _ flow : float
The flow area for the fluid , calculated as
: math : ` A _ { ch } = 2 \ cdot \ text { width } \ cdot \ text { amplitude } ` [ m ]
Returns
h : float
Boiling heat transfer coefficient [ W / m ^ 2 / K ]
Notes
Developed with R134a as the refrigerant in a PHD with 2 channels , chevron
angle 60 degrees , quality from 0.1 to 0.8 , heat flux 11-15 kW / m ^ 2 , and mass
fluxes of 55 and 70 kg / m ^ 2 / s .
Examples
> > > h _ boiling _ Yan _ Lin ( m = 3E - 5 , x = . 4 , Dh = 0.002 , rhol = 567 . , rhog = 18.09,
. . . kl = 0.086 , Cpl = 2200 , mul = 156E - 6 , Hvap = 9E5 , q = 1E5 , A _ channel _ flow = 0.0003)
318.7228565961241
References
. . [ 1 ] Yan , Y . - Y . , and T . - F . Lin . " Evaporation Heat Transfer and Pressure
Drop of Refrigerant R - 134a in a Plate Heat Exchanger . " Journal of Heat
Transfer 121 , no . 1 ( February 1 , 1999 ) : 118-27 . doi : 10.1115/1.2825924.
. . [ 2 ] Amalfi , Raffaele L . , Farzad Vakili - Farahani , and John R . Thome .
" Flow Boiling and Frictional Pressure Gradients in Plate Heat Exchangers .
Part 1 : Review and Experimental Database . " International Journal of
Refrigeration 61 ( January 2016 ) : 166-84.
doi : 10.1016 / j . ijrefrig . 2015.07.010.
. . [ 3 ] Eldeeb , Radia , Vikrant Aute , and Reinhard Radermacher . " A Survey of
Correlations for Heat Transfer and Pressure Drop for Evaporation and
Condensation in Plate Heat Exchangers . " International Journal of
Refrigeration 65 ( May 2016 ) : 12-26 . doi : 10.1016 / j . ijrefrig . 2015.11.013.
. . [ 4 ] García - Cascales , J . R . , F . Vera - García , J . M . Corberán - Salvador , and
J . Gonzálvez - Maciá . " Assessment of Boiling and Condensation Heat
Transfer Correlations in the Modelling of Plate Heat Exchangers . "
International Journal of Refrigeration 30 , no . 6 ( September 2007 ) :
1029-41 . doi : 10.1016 / j . ijrefrig . 2007.01.004.
. . [ 5 ] Huang , Jianchang . " Performance Analysis of Plate Heat Exchangers
Used as Refrigerant Evaporators , " 2011 . Thesis .
http : / / wiredspace . wits . ac . za / handle / 10539/9779''' | G = m / A_channel_flow
G_eq = G * ( ( 1. - x ) + x * ( rhol / rhog ) ** 0.5 )
Re_eq = G_eq * Dh / mul
Re = G * Dh / mul
# Not actually specified clearly but it is in another paper by them
Bo_eq = q / ( G_eq * Hvap )
Pr_l = Prandtl ( Cp = Cpl , k = kl , mu = mul )
return 1.926 * ( kl / Dh ) * Re_eq * Pr_l ** ( 1 / 3. ) * Bo_eq ** 0.3 * Re ** - 0.5 |
def parse_rule ( rule ) :
"""Parse a rule and return it as generator . Each iteration yields tuples
in the form ` ` ( converter , arguments , variable ) ` ` . If the converter is
` None ` it ' s a static url part , otherwise it ' s a dynamic one .
: internal :""" | pos = 0
end = len ( rule )
do_match = _rule_re . match
used_names = set ( )
while pos < end :
m = do_match ( rule , pos )
if m is None :
break
data = m . groupdict ( )
if data [ "static" ] :
yield None , None , data [ "static" ]
variable = data [ "variable" ]
converter = data [ "converter" ] or "default"
if variable in used_names :
raise ValueError ( "variable name %r used twice." % variable )
used_names . add ( variable )
yield converter , data [ "args" ] or None , variable
pos = m . end ( )
if pos < end :
remaining = rule [ pos : ]
if ">" in remaining or "<" in remaining :
raise ValueError ( "malformed url rule: %r" % rule )
yield None , None , remaining |
def put_stream ( self , bucket , label , stream_object , params = None , replace = True , add_md = True ) :
'''Put a bitstream ( stream _ object ) for the specified bucket : label identifier .
: param bucket : as standard
: param label : as standard
: param stream _ object : file - like object to read from or bytestring .
: param params : update metadata with these params ( see ` update _ metadata ` )''' | if self . mode == "r" :
raise OFSException ( "Cannot write into archive in 'r' mode" )
else :
params = params or { }
fn = self . _zf ( bucket , label )
params [ '_creation_date' ] = datetime . now ( ) . isoformat ( ) . split ( "." ) [ 0 ]
# # ' 2010-07-08T19:56:47'
params [ '_label' ] = label
if self . exists ( bucket , label ) and replace == True : # Add then Replace ? Let ' s see if that works . . .
# z = ZipFile ( self . zipfile , self . mode , self . compression , self . allowZip64)
zinfo = self . z . getinfo ( fn )
size , chksum = self . _write ( self . z , bucket , label , stream_object )
self . _del_stream ( zinfo )
# z . close ( )
params [ '_content_length' ] = size
if chksum :
params [ '_checksum' ] = chksum
else : # z = ZipFile ( self . zipfile , self . mode , self . compression , self . allowZip64)
size , chksum = self . _write ( self . z , bucket , label , stream_object )
# z . close ( )
params [ '_content_length' ] = size
if chksum :
params [ '_checksum' ] = chksum
if add_md :
params = self . update_metadata ( bucket , label , params )
return params |
def forwards ( self , orm ) :
"Write your forwards methods here ." | for tag in orm [ 'tagging.Tag' ] . objects . all ( ) :
if tag . tagtitle_set . all ( ) . count ( ) == 0 :
orm [ 'tagging_translated.TagTitle' ] . objects . create ( trans_name = tag . name , tag = tag , language = 'en' ) |
def finish ( self , data = '' ) :
'''Optionally add pending data , turn off streaming mode , and yield
result chunks , which implies all pending data will be consumed .
: yields : result chunks
: ytype : str''' | self . pending += data
self . streaming = False
for i in self :
yield i |
def identifier_director ( ** kwargs ) :
"""Direct how to handle the identifier element .""" | ark = kwargs . get ( 'ark' , None )
domain_name = kwargs . get ( 'domain_name' , None )
# Set default scheme if it is None or is not supplied .
scheme = kwargs . get ( 'scheme' ) or 'http'
qualifier = kwargs . get ( 'qualifier' , None )
content = kwargs . get ( 'content' , '' )
# See if the ark and domain name were given .
if ark and qualifier == 'ark' :
content = 'ark: %s' % ark
if domain_name and ark and qualifier == 'permalink' : # Create the permalink URL .
if not domain_name . endswith ( '/' ) :
domain_name += '/'
permalink_url = '%s://%s%s' % ( scheme , domain_name , ark )
# Make sure it has a trailing slash .
if not permalink_url . endswith ( '/' ) :
permalink_url += '/'
content = permalink_url
else :
if qualifier :
content = '%s: %s' % ( string . lower ( qualifier ) , content )
return DCIdentifier ( content = content ) |
def default_indexes ( coords : Mapping [ Any , Variable ] , dims : Iterable , ) -> 'OrderedDict[Any, pd.Index]' :
"""Default indexes for a Dataset / DataArray .
Parameters
coords : Mapping [ Any , xarray . Variable ]
Coordinate variables from which to draw default indexes .
dims : iterable
Iterable of dimension names .
Returns
Mapping from indexing keys ( levels / dimension names ) to indexes used for
indexing along that dimension .""" | return OrderedDict ( ( key , coords [ key ] . to_index ( ) ) for key in dims if key in coords ) |
def update_app ( self , app_id , app , force = False , minimal = True ) :
"""Update an app .
Applies writable settings in ` app ` to ` app _ id `
Note : this method can not be used to rename apps .
: param str app _ id : target application ID
: param app : application settings
: type app : : class : ` marathon . models . app . MarathonApp `
: param bool force : apply even if a deployment is in progress
: param bool minimal : ignore nulls and empty collections
: returns : a dict containing the deployment id and version
: rtype : dict""" | # Changes won ' t take if version is set - blank it for convenience
app . version = None
params = { 'force' : force }
data = app . to_json ( minimal = minimal )
response = self . _do_request ( 'PUT' , '/v2/apps/{app_id}' . format ( app_id = app_id ) , params = params , data = data )
return response . json ( ) |
def variables ( self ) :
"""Display a list of templatable variables present in the file .
Templating is accomplished by creating a bracketed object in the same
way that Python performs ` string formatting ` _ . The editor is able to
replace the placeholder value of the template . Integer templates are
positional arguments .
. . _ string formatting : https : / / docs . python . org / 3.6 / library / string . html""" | string = str ( self )
constants = [ match [ 1 : - 1 ] for match in re . findall ( '{{[A-z0-9]}}' , string ) ]
variables = re . findall ( '{[A-z0-9]*}' , string )
return sorted ( set ( variables ) . difference ( constants ) ) |
def populate ( cls , as_of = None ) :
"""Ensure the next X years of billing cycles exist""" | return cls . _populate ( as_of = as_of or date . today ( ) , delete = True ) |
def as_number ( self ) :
"""> > > round ( SummableVersion ( ' 1.9.3 ' ) . as _ number ( ) , 12)
1.93""" | def combine ( subver , ver ) :
return subver / 10 + ver
return reduce ( combine , reversed ( self . version ) ) |
def fetch ( self , R , pk , depth = 1 ) :
"Request object from API" | d , e = self . _fetcher . fetch ( R , pk , depth )
if e :
raise e
return d |
def parse ( self , ** kwargs ) :
"""Parse the contents of the output files retrieved in the ` FolderData ` .""" | try :
output_folder = self . retrieved
except exceptions . NotExistent :
return self . exit_codes . ERROR_NO_RETRIEVED_FOLDER
filename_stdout = self . node . get_attribute ( 'output_filename' )
filename_stderr = self . node . get_attribute ( 'error_filename' )
try :
with output_folder . open ( filename_stderr , 'r' ) as handle :
exit_code = self . parse_stderr ( handle )
except ( OSError , IOError ) :
self . logger . exception ( 'Failed to read the stderr file\n%s' , traceback . format_exc ( ) )
return self . exit_codes . ERROR_READING_ERROR_FILE
if exit_code :
return exit_code
try :
with output_folder . open ( filename_stdout , 'r' ) as handle :
handle . seek ( 0 )
exit_code = self . parse_stdout ( handle )
except ( OSError , IOError ) :
self . logger . exception ( 'Failed to read the stdout file\n%s' , traceback . format_exc ( ) )
return self . exit_codes . ERROR_READING_OUTPUT_FILE
if exit_code :
return exit_code |
def get_resource_url ( self ) :
"""Get resource complete url""" | name = self . __class__ . resource_name
url = self . __class__ . rest_base_url ( )
return "%s/%s" % ( url , name ) |
def safe_purge_collection ( coll ) :
"""Cannot remove documents from capped collections
in later versions of MongoDB , so drop the
collection instead .""" | op = ( drop_collection if coll . options ( ) . get ( 'capped' , False ) else purge_collection )
return op ( coll ) |
def export_as_package ( self , package_path , cv_source ) :
"""Exports the ensemble as a Python package and saves it to ` package _ path ` .
Args :
package _ path ( str , unicode ) : Absolute / local path of place to save package in
cv _ source ( str , unicode ) : String containing actual code for base learner
cross - validation used to generate secondary meta - features .
Raises :
exceptions . UserError : If os . path . join ( path , name ) already exists .""" | if os . path . exists ( package_path ) :
raise exceptions . UserError ( '{} already exists' . format ( package_path ) )
package_name = os . path . basename ( os . path . normpath ( package_path ) )
os . makedirs ( package_path )
# Write _ _ init _ _ . py
with open ( os . path . join ( package_path , '__init__.py' ) , 'wb' ) as f :
f . write ( 'from {}.builder import xcessiv_ensemble' . format ( package_name ) . encode ( 'utf8' ) )
# Create package baselearners with each base learner having its own module
os . makedirs ( os . path . join ( package_path , 'baselearners' ) )
open ( os . path . join ( package_path , 'baselearners' , '__init__.py' ) , 'a' ) . close ( )
for idx , base_learner in enumerate ( self . base_learners ) :
base_learner . export_as_file ( os . path . join ( package_path , 'baselearners' , 'baselearner' + str ( idx ) ) )
# Create metalearner . py containing secondary learner
self . base_learner_origin . export_as_file ( os . path . join ( package_path , 'metalearner' ) , self . secondary_learner_hyperparameters )
# Create cv . py containing CV method for getting meta - features
with open ( os . path . join ( package_path , 'cv.py' ) , 'wb' ) as f :
f . write ( cv_source . encode ( 'utf8' ) )
# Create stacker . py containing class for Xcessiv ensemble
ensemble_source = ''
stacker_file_loc = os . path . join ( os . path . abspath ( os . path . dirname ( __file__ ) ) , 'stacker.py' )
with open ( stacker_file_loc ) as f :
ensemble_source += f . read ( )
ensemble_source += '\n\n' ' def {}(self, X):\n' ' return self._process_using_' 'meta_feature_generator(X, "{}")\n\n' . format ( self . base_learner_origin . meta_feature_generator , self . base_learner_origin . meta_feature_generator )
with open ( os . path . join ( package_path , 'stacker.py' ) , 'wb' ) as f :
f . write ( ensemble_source . encode ( 'utf8' ) )
# Create builder . py containing file where ` xcessiv _ ensemble ` is instantiated for import
builder_source = ''
for idx , base_learner in enumerate ( self . base_learners ) :
builder_source += 'from {}.baselearners import baselearner{}\n' . format ( package_name , idx )
builder_source += 'from {}.cv import return_splits_iterable\n' . format ( package_name )
builder_source += 'from {} import metalearner\n' . format ( package_name )
builder_source += 'from {}.stacker import XcessivStackedEnsemble\n' . format ( package_name )
builder_source += '\nbase_learners = [\n'
for idx , base_learner in enumerate ( self . base_learners ) :
builder_source += ' baselearner{}.base_learner,\n' . format ( idx )
builder_source += ']\n'
builder_source += '\nmeta_feature_generators = [\n'
for idx , base_learner in enumerate ( self . base_learners ) :
builder_source += ' baselearner{}.meta_feature_generator,\n' . format ( idx )
builder_source += ']\n'
builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' ' meta_feature_generators=meta_feature_generators,' ' secondary_learner=metalearner.base_learner,' ' cv_function=return_splits_iterable)\n'
with open ( os . path . join ( package_path , 'builder.py' ) , 'wb' ) as f :
f . write ( builder_source . encode ( 'utf8' ) ) |
async def _move ( self , target_position : 'OrderedDict[Axis, float]' , speed : float = None , home_flagged_axes : bool = True ) :
"""Worker function to apply robot motion .
Robot motion means the kind of motions that are relevant to the robot ,
i . e . only one pipette plunger and mount move at the same time , and an
XYZ move in the coordinate frame of one of the pipettes .
` ` target _ position ` ` should be an ordered dict ( ordered by XYZABC )
of deck calibrated values , containing any specified XY motion and
at most one of a ZA or BC components . The frame in which to move
is identified by the presence of ( ZA ) or ( BC ) .""" | # Transform only the x , y , and ( z or a ) axes specified since this could
# get the b or c axes as well
to_transform = tuple ( ( tp for ax , tp in target_position . items ( ) if ax in Axis . gantry_axes ( ) ) )
# Pre - fill the dict we ’ ll send to the backend with the axes we don ’ t
# need to transform
smoothie_pos = { ax . name : pos for ax , pos in target_position . items ( ) if ax not in Axis . gantry_axes ( ) }
# We ’ d better have all of ( x , y , ( z or a ) ) or none of them since the
# gantry transform requires them all
if len ( to_transform ) != 3 :
self . _log . error ( "Move derived {} axes to transform from {}" . format ( len ( to_transform ) , target_position ) )
raise ValueError ( "Moves must specify either exactly an x, y, and " "(z or a) or none of them" )
# Type ignored below because linal . apply _ transform ( rightly ) specifies
# Tuple [ float , float , float ] and the implied type from
# target _ position . items ( ) is ( rightly ) Tuple [ float , . . . ] with unbounded
# size ; unfortunately , mypy can ’ t quite figure out the length check
# above that makes this OK
transformed = linal . apply_transform ( # type : ignore
self . config . gantry_calibration , to_transform )
# Since target _ position is an OrderedDict with the axes ordered by
# ( x , y , z , a , b , c ) , and we ’ ll only have one of a or z ( as checked
# by the len ( to _ transform ) check above ) we can use an enumerate to
# fuse the specified axes and the transformed values back together .
# While we do this iteration , we ’ ll also check axis bounds .
bounds = self . _backend . axis_bounds
for idx , ax in enumerate ( target_position . keys ( ) ) :
if ax in Axis . gantry_axes ( ) :
smoothie_pos [ ax . name ] = transformed [ idx ]
if smoothie_pos [ ax . name ] < bounds [ ax . name ] [ 0 ] or smoothie_pos [ ax . name ] > bounds [ ax . name ] [ 1 ] :
deck_mins = self . _deck_from_smoothie ( { ax : bound [ 0 ] for ax , bound in bounds . items ( ) } )
deck_max = self . _deck_from_smoothie ( { ax : bound [ 1 ] for ax , bound in bounds . items ( ) } )
self . _log . warning ( "Out of bounds move: {}={} (transformed: {}) not in" "limits ({}, {}) (transformed: ({}, {})" . format ( ax . name , target_position [ ax ] , smoothie_pos [ ax . name ] , deck_mins [ ax ] , deck_max [ ax ] , bounds [ ax . name ] [ 0 ] , bounds [ ax . name ] [ 1 ] ) )
async with self . _motion_lock :
try :
self . _backend . move ( smoothie_pos , speed = speed , home_flagged_axes = home_flagged_axes )
except Exception :
self . _log . exception ( 'Move failed' )
self . _current_position . clear ( )
raise
else :
self . _current_position . update ( target_position ) |
def update_campaign_destroy ( self , campaign_id , ** kwargs ) : # noqa : E501
"""Delete a campaign # noqa : E501
Delete an update campaign . # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . update _ campaign _ destroy ( campaign _ id , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str campaign _ id : The ID of the update campaign ( required )
: return : None
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . update_campaign_destroy_with_http_info ( campaign_id , ** kwargs )
# noqa : E501
else :
( data ) = self . update_campaign_destroy_with_http_info ( campaign_id , ** kwargs )
# noqa : E501
return data |
def broadcast_setting_change ( message = 'Environment' ) :
'''Send a WM _ SETTINGCHANGE Broadcast to all Windows
Args :
message ( str ) :
A string value representing the portion of the system that has been
updated and needs to be refreshed . Default is ` ` Environment ` ` . These
are some common values :
- " Environment " : to effect a change in the environment variables
- " intl " : to effect a change in locale settings
- " Policy " : to effect a change in Group Policy Settings
- a leaf node in the registry
- the name of a section in the ` ` Win . ini ` ` file
See lParam within msdn docs for
` WM _ SETTINGCHANGE < https : / / msdn . microsoft . com / en - us / library / ms725497%28VS . 85%29 . aspx > ` _
for more information on Broadcasting Messages .
See GWL _ WNDPROC within msdn docs for
` SetWindowLong < https : / / msdn . microsoft . com / en - us / library / windows / desktop / ms633591 ( v = vs . 85 ) . aspx > ` _
for information on how to retrieve those messages .
. . note : :
This will only affect new processes that aren ' t launched by services . To
apply changes to the path or registry to services , the host must be
restarted . The ` ` salt - minion ` ` , if running as a service , will not see
changes to the environment until the system is restarted . Services
inherit their environment from ` ` services . exe ` ` which does not respond
to messaging events . See
` MSDN Documentation < https : / / support . microsoft . com / en - us / help / 821761 / changes - that - you - make - to - environment - variables - do - not - affect - services > ` _
for more information .
CLI Example :
. . . code - block : : python
import salt . utils . win _ functions
salt . utils . win _ functions . broadcast _ setting _ change ( ' Environment ' )''' | # Listen for messages sent by this would involve working with the
# SetWindowLong function . This can be accessed via win32gui or through
# ctypes . You can find examples on how to do this by searching for
# ` Accessing WGL _ WNDPROC ` on the internet . Here are some examples of how
# this might work :
# # using win32gui
# import win32con
# import win32gui
# old _ function = win32gui . SetWindowLong ( window _ handle , win32con . GWL _ WNDPROC , new _ function )
# # using ctypes
# import ctypes
# import win32con
# from ctypes import c _ long , c _ int
# user32 = ctypes . WinDLL ( ' user32 ' , use _ last _ error = True )
# WndProcType = ctypes . WINFUNCTYPE ( c _ int , c _ long , c _ int , c _ int )
# new _ function = WndProcType
# old _ function = user32 . SetWindowLongW ( window _ handle , win32con . GWL _ WNDPROC , new _ function )
broadcast_message = ctypes . create_unicode_buffer ( message )
user32 = ctypes . WinDLL ( 'user32' , use_last_error = True )
result = user32 . SendMessageTimeoutW ( HWND_BROADCAST , WM_SETTINGCHANGE , 0 , broadcast_message , SMTO_ABORTIFHUNG , 5000 , 0 )
return result == 1 |
def Query ( cls , index_urn , target_prefix = "" , limit = 100 , token = None ) :
"""Search the index for matches starting with target _ prefix .
Args :
index _ urn : The index to use . Should be a urn that points to the sha256
namespace .
target _ prefix : The prefix to match against the index .
limit : Either a tuple of ( start , limit ) or a maximum number of results to
return .
token : A DB token .
Returns :
URNs of files which have the same data as this file - as read from the
index .""" | return data_store . DB . FileHashIndexQuery ( index_urn , target_prefix , limit = limit ) |
def parse ( self , rrstr ) : # type : ( bytes ) - > None
'''Parse a Rock Ridge Time Stamp record out of a string .
Parameters :
rrstr - The string to parse the record out of .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'TF record already initialized!' )
# We assume that the caller has already checked the su _ entry _ version ,
# so we don ' t bother .
( su_len , su_entry_version_unused , self . time_flags , ) = struct . unpack_from ( '=BBB' , rrstr [ : 5 ] , 2 )
if su_len < 5 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Not enough bytes in the TF record' )
tflen = 7
if self . time_flags & ( 1 << 7 ) :
tflen = 17
offset = 5
for index , fieldname in enumerate ( self . FIELDNAMES ) :
if self . time_flags & ( 1 << index ) :
if tflen == 7 :
setattr ( self , fieldname , dates . DirectoryRecordDate ( ) )
elif tflen == 17 :
setattr ( self , fieldname , dates . VolumeDescriptorDate ( ) )
getattr ( self , fieldname ) . parse ( rrstr [ offset : offset + tflen ] )
offset += tflen
self . _initialized = True |
def deprecated_conditional ( predicate , removal_version , entity_description , hint_message = None , stacklevel = 4 ) :
"""Marks a certain configuration as deprecated .
The predicate is used to determine if that configuration is deprecated . It is a function that
will be called , if true , then the deprecation warning will issue .
: param ( ) - > bool predicate : A function that returns True if the deprecation warning should be on .
: param string removal _ version : The pants version which will remove the deprecated functionality .
: param string entity _ description : A description of the deprecated entity .
: param string hint _ message : An optional hint pointing to alternatives to the deprecation .
: param int stacklevel : How far up in the stack do we go to find the calling fn to report
: raises DeprecationApplicationError if the deprecation is applied improperly .""" | validate_deprecation_semver ( removal_version , 'removal version' )
if predicate ( ) :
warn_or_error ( removal_version , entity_description , hint_message , stacklevel = stacklevel ) |
def _format_response ( rows , fields , unique_col_names ) :
"""This function will look at the data column of rows and extract the specified fields . It
will also dedup changes where the specified fields have not changed . The list of rows should
be ordered by the compound primary key which versioning pivots around and be in ascending
version order .
This function will return a list of dictionaries where each dictionary has the following
schema :
' updated _ at ' : timestamp of the change ,
' version ' : version number for the change ,
' data ' : a nested dictionary containing all keys specified in fields and values
corresponding to values in the user table .
Note that some versions may be omitted in the output for the same key if the specified fields
were not changed between versions .
: param rows : a list of dictionaries representing rows from the ArchiveTable .
: param fields : a list of strings of fields to be extracted from the archived row .""" | output = [ ]
old_id = None
for row in rows :
id_ = { k : row [ k ] for k in unique_col_names }
formatted = { k : row [ k ] for k in row if k != 'data' }
if id_ != old_id : # new unique versioned row
data = row [ 'data' ]
formatted [ 'data' ] = { k : data . get ( k ) for k in fields }
output . append ( formatted )
else :
data = row [ 'data' ]
pruned_data = { k : data . get ( k ) for k in fields }
if ( pruned_data != output [ - 1 ] [ 'data' ] or row [ 'deleted' ] != output [ - 1 ] [ 'deleted' ] ) :
formatted [ 'data' ] = pruned_data
output . append ( formatted )
old_id = id_
return output |
def _send_data ( self , data ) :
"""Send data to the ADB server""" | total_sent = 0
while total_sent < len ( data ) : # Send only the bytes that haven ' t been
# sent yet
sent = self . socket . send ( data [ total_sent : ] . encode ( "ascii" ) )
if sent == 0 :
self . close ( )
raise RuntimeError ( "Socket connection dropped, " "send failed" )
total_sent += sent |
def nb_to_q_nums ( nb ) -> list :
"""Gets question numbers from each cell in the notebook""" | def q_num ( cell ) :
assert cell . metadata . tags
return first ( filter ( lambda t : 'q' in t , cell . metadata . tags ) )
return [ q_num ( cell ) for cell in nb [ 'cells' ] ] |
def _data_received ( self , next_bytes ) :
"""Maintains buffer of bytes received from peer and extracts bgp
message from this buffer if enough data is received .
Validates bgp message marker , length , type and data and constructs
appropriate bgp message instance and calls handler .
: Parameters :
- ` next _ bytes ` : next set of bytes received from peer .""" | # Append buffer with received bytes .
self . _recv_buff += next_bytes
while True : # If current buffer size is less then minimum bgp message size , we
# return as we do not have a complete bgp message to work with .
if len ( self . _recv_buff ) < BGP_MIN_MSG_LEN :
return
# Parse message header into elements .
auth , length , ptype = BgpProtocol . parse_msg_header ( self . _recv_buff [ : BGP_MIN_MSG_LEN ] )
# Check if we have valid bgp message marker .
# We should get default marker since we are not supporting any
# authentication .
if ( auth != BgpProtocol . MESSAGE_MARKER ) :
LOG . error ( 'Invalid message marker received: %s' , auth )
raise bgp . NotSync ( )
# Check if we have valid bgp message length .
check = ( length < BGP_MIN_MSG_LEN or length > BGP_MAX_MSG_LEN )
# RFC says : The minimum length of the OPEN message is 29
# octets ( including the message header ) .
check2 = ( ptype == BGP_MSG_OPEN and length < BGPOpen . _MIN_LEN )
# RFC says : A KEEPALIVE message consists of only the
# message header and has a length of 19 octets .
check3 = ( ptype == BGP_MSG_KEEPALIVE and length != BGPKeepAlive . _MIN_LEN )
# RFC says : The minimum length of the UPDATE message is 23
# octets .
check4 = ( ptype == BGP_MSG_UPDATE and length < BGPUpdate . _MIN_LEN )
if any ( ( check , check2 , check3 , check4 ) ) :
raise bgp . BadLen ( ptype , length )
# If we have partial message we wait for rest of the message .
if len ( self . _recv_buff ) < length :
return
msg , _ , rest = BGPMessage . parser ( self . _recv_buff )
self . _recv_buff = rest
# If we have a valid bgp message we call message handler .
self . _handle_msg ( msg ) |
def _api_path ( self , item ) :
"""Get the API path for the current cursor position .""" | if self . base_url is None :
raise NotImplementedError ( "base_url not set" )
path = "/" . join ( [ x . blob [ "id" ] for x in item . path ] )
return "/" . join ( [ self . base_url , path ] ) |
def validate_kernel_string ( self , kernel ) :
"""determine if a kernel string is valid , meaning it is in the format
of { username } / { kernel - slug } .
Parameters
kernel : the kernel name to validate""" | if kernel :
if '/' not in kernel :
raise ValueError ( 'Kernel must be specified in the form of ' '\'{username}/{kernel-slug}\'' )
split = kernel . split ( '/' )
if not split [ 0 ] or not split [ 1 ] :
raise ValueError ( 'Kernel must be specified in the form of ' '\'{username}/{kernel-slug}\'' )
if len ( split [ 1 ] ) < 5 :
raise ValueError ( 'Kernel slug must be at least five characters' ) |
def OnKey ( self , event ) :
"""Handles non - standard shortcut events""" | def switch_to_next_table ( ) :
newtable = self . grid . current_table + 1
post_command_event ( self . grid , self . GridActionTableSwitchMsg , newtable = newtable )
def switch_to_previous_table ( ) :
newtable = self . grid . current_table - 1
post_command_event ( self . grid , self . GridActionTableSwitchMsg , newtable = newtable )
grid = self . grid
actions = grid . actions
shift , alt , ctrl = 1 , 1 << 1 , 1 << 2
# Shortcuts key tuple : ( modifier , keycode )
# Modifier may be e . g . shift | ctrl
shortcuts = { # < Esc > pressed
( 0 , 27 ) : lambda : setattr ( actions , "need_abort" , True ) , # < Del > pressed
( 0 , 127 ) : actions . delete , # < Home > pressed
( 0 , 313 ) : lambda : actions . set_cursor ( ( grid . GetGridCursorRow ( ) , 0 ) ) , # < Ctrl > + R pressed
( ctrl , 82 ) : actions . copy_selection_access_string , # < Ctrl > + + pressed
( ctrl , 388 ) : actions . zoom_in , # < Ctrl > + - pressed
( ctrl , 390 ) : actions . zoom_out , # < Shift > + < Space > pressed
( shift , 32 ) : lambda : grid . SelectRow ( grid . GetGridCursorRow ( ) ) , # < Ctrl > + < Space > pressed
( ctrl , 32 ) : lambda : grid . SelectCol ( grid . GetGridCursorCol ( ) ) , # < Shift > + < Ctrl > + < Space > pressed
( shift | ctrl , 32 ) : grid . SelectAll , }
if self . main_window . IsFullScreen ( ) : # < Arrow up > pressed
shortcuts [ ( 0 , 315 ) ] = switch_to_previous_table
# < Arrow down > pressed
shortcuts [ ( 0 , 317 ) ] = switch_to_next_table
# < Space > pressed
shortcuts [ ( 0 , 32 ) ] = switch_to_next_table
keycode = event . GetKeyCode ( )
modifier = shift * event . ShiftDown ( ) | alt * event . AltDown ( ) | ctrl * event . ControlDown ( )
if ( modifier , keycode ) in shortcuts :
shortcuts [ ( modifier , keycode ) ] ( )
else :
event . Skip ( ) |
def _prepare_record ( self , group ) :
"""compute record dtype and parents dict fro this group
Parameters
group : dict
MDF group dict
Returns
parents , dtypes : dict , numpy . dtype
mapping of channels to records fields , records fields dtype""" | parents , dtypes = group . parents , group . types
no_parent = None , None
if parents is None :
channel_group = group . channel_group
channels = group . channels
bus_event = channel_group . flags & v4c . FLAG_CG_BUS_EVENT
record_size = channel_group . samples_byte_nr
invalidation_bytes_nr = channel_group . invalidation_bytes_nr
next_byte_aligned_position = 0
types = [ ]
current_parent = ""
parent_start_offset = 0
parents = { }
group_channels = UniqueDB ( )
neg_index = - 1
sortedchannels = sorted ( enumerate ( channels ) , key = lambda i : i [ 1 ] )
for original_index , new_ch in sortedchannels :
start_offset = new_ch . byte_offset
bit_offset = new_ch . bit_offset
data_type = new_ch . data_type
bit_count = new_ch . bit_count
ch_type = new_ch . channel_type
dependency_list = group . channel_dependencies [ original_index ]
name = new_ch . name
# handle multiple occurance of same channel name
name = group_channels . get_unique_name ( name )
if start_offset >= next_byte_aligned_position :
if ch_type not in v4c . VIRTUAL_TYPES :
if not dependency_list :
parent_start_offset = start_offset
# check if there are byte gaps in the record
gap = parent_start_offset - next_byte_aligned_position
if gap :
types . append ( ( "" , f"V{gap}" ) )
# adjust size to 1 , 2 , 4 or 8 bytes
size = bit_offset + bit_count
if data_type not in v4c . NON_SCALAR_TYPES :
if size > 32 :
size = 8
elif size > 16 :
size = 4
elif size > 8 :
size = 2
else :
size = 1
else :
size = size // 8
next_byte_aligned_position = parent_start_offset + size
bit_count = size * 8
if next_byte_aligned_position <= record_size :
if not new_ch . dtype_fmt :
new_ch . dtype_fmt = get_fmt_v4 ( data_type , bit_count , ch_type )
dtype_pair = ( name , new_ch . dtype_fmt )
types . append ( dtype_pair )
parents [ original_index ] = name , bit_offset
else :
next_byte_aligned_position = parent_start_offset
current_parent = name
else :
if isinstance ( dependency_list [ 0 ] , ChannelArrayBlock ) :
ca_block = dependency_list [ 0 ]
# check if there are byte gaps in the record
gap = start_offset - next_byte_aligned_position
if gap :
dtype_pair = "" , f"V{gap}"
types . append ( dtype_pair )
size = max ( bit_count // 8 , 1 )
shape = tuple ( ca_block [ f"dim_size_{i}" ] for i in range ( ca_block . dims ) )
if ( ca_block . byte_offset_base // size > 1 and len ( shape ) == 1 ) :
shape += ( ca_block . byte_offset_base // size , )
dim = 1
for d in shape :
dim *= d
if not new_ch . dtype_fmt :
new_ch . dtype_fmt = get_fmt_v4 ( data_type , bit_count )
dtype_pair = ( name , new_ch . dtype_fmt , shape )
types . append ( dtype_pair )
current_parent = name
next_byte_aligned_position = start_offset + size * dim
parents [ original_index ] = name , 0
else :
parents [ original_index ] = no_parent
if bus_event :
for logging_channel in group . logging_channels :
parents [ neg_index ] = ( "CAN_DataFrame.DataBytes" , logging_channel . bit_offset , )
neg_index -= 1
# virtual channels do not have bytes in the record
else :
parents [ original_index ] = no_parent
else :
max_overlapping_size = ( next_byte_aligned_position - start_offset ) * 8
needed_size = bit_offset + bit_count
if max_overlapping_size >= needed_size :
parents [ original_index ] = ( current_parent , ( ( start_offset - parent_start_offset ) * 8 ) + bit_offset , )
if next_byte_aligned_position > record_size :
break
gap = record_size - next_byte_aligned_position
if gap > 0 :
dtype_pair = "" , f"V{gap}"
types . append ( dtype_pair )
dtype_pair = "invalidation_bytes" , "<u1" , ( invalidation_bytes_nr , )
types . append ( dtype_pair )
dtypes = dtype ( types )
group . parents , group . types = parents , dtypes
return parents , dtypes |
def prune_chunks ( self , tsn ) :
"""Prune chunks up to the given TSN .""" | pos = - 1
size = 0
for i , chunk in enumerate ( self . reassembly ) :
if uint32_gte ( tsn , chunk . tsn ) :
pos = i
size += len ( chunk . user_data )
else :
break
self . reassembly = self . reassembly [ pos + 1 : ]
return size |
def get_block_containing_tx ( self , txid ) :
"""Retrieve the list of blocks ( block ids ) containing a
transaction with transaction id ` txid `
Args :
txid ( str ) : transaction id of the transaction to query
Returns :
Block id list ( list ( int ) )""" | blocks = list ( backend . query . get_block_with_transaction ( self . connection , txid ) )
if len ( blocks ) > 1 :
logger . critical ( 'Transaction id %s exists in multiple blocks' , txid )
return [ block [ 'height' ] for block in blocks ] |
def is_fully_verified ( self ) :
"""Determine if this Job is fully verified based on the state of its Errors .
An Error ( TextLogError or FailureLine ) is considered Verified once its
related TextLogErrorMetadata has best _ is _ verified set to True . A Job
is then considered Verified once all its Errors TextLogErrorMetadata
instances are set to True .""" | unverified_errors = TextLogError . objects . filter ( _metadata__best_is_verified = False , step__job = self ) . count ( )
if unverified_errors :
logger . error ( "Job %r has unverified TextLogErrors" , self )
return False
logger . info ( "Job %r is fully verified" , self )
return True |
def get_observer_look ( sat_lon , sat_lat , sat_alt , utc_time , lon , lat , alt ) :
"""Calculate observers look angle to a satellite .
http : / / celestrak . com / columns / v02n02/
utc _ time : Observation time ( datetime object )
lon : Longitude of observer position on ground in degrees east
lat : Latitude of observer position on ground in degrees north
alt : Altitude above sea - level ( geoid ) of observer position on ground in km
Return : ( Azimuth , Elevation )""" | ( pos_x , pos_y , pos_z ) , ( vel_x , vel_y , vel_z ) = astronomy . observer_position ( utc_time , sat_lon , sat_lat , sat_alt )
( opos_x , opos_y , opos_z ) , ( ovel_x , ovel_y , ovel_z ) = astronomy . observer_position ( utc_time , lon , lat , alt )
lon = np . deg2rad ( lon )
lat = np . deg2rad ( lat )
theta = ( astronomy . gmst ( utc_time ) + lon ) % ( 2 * np . pi )
rx = pos_x - opos_x
ry = pos_y - opos_y
rz = pos_z - opos_z
sin_lat = np . sin ( lat )
cos_lat = np . cos ( lat )
sin_theta = np . sin ( theta )
cos_theta = np . cos ( theta )
top_s = sin_lat * cos_theta * rx + sin_lat * sin_theta * ry - cos_lat * rz
top_e = - sin_theta * rx + cos_theta * ry
top_z = cos_lat * cos_theta * rx + cos_lat * sin_theta * ry + sin_lat * rz
az_ = np . arctan ( - top_e / top_s )
if has_xarray and isinstance ( az_ , xr . DataArray ) :
az_data = az_ . data
else :
az_data = az_
if has_dask and isinstance ( az_data , da . Array ) :
az_data = da . where ( top_s > 0 , az_data + np . pi , az_data )
az_data = da . where ( az_data < 0 , az_data + 2 * np . pi , az_data )
else :
az_data [ np . where ( top_s > 0 ) ] += np . pi
az_data [ np . where ( az_data < 0 ) ] += 2 * np . pi
if has_xarray and isinstance ( az_ , xr . DataArray ) :
az_ . data = az_data
else :
az_ = az_data
rg_ = np . sqrt ( rx * rx + ry * ry + rz * rz )
el_ = np . arcsin ( top_z / rg_ )
return np . rad2deg ( az_ ) , np . rad2deg ( el_ ) |
def unmonitor ( self , target ) :
"""Stop monitoring the online status of a user . Returns whether or not the server supports monitoring .""" | if 'monitor-notify' in self . _capabilities and self . is_monitoring ( target ) :
yield from self . rawmsg ( 'MONITOR' , '-' , target )
self . _monitoring . remove ( target )
return True
else :
return False |
def fitlin ( imgarr , refarr ) :
"""Compute the least - squares fit between two arrays .
A Python translation of ' FITLIN ' from ' drutil . f ' ( Drizzle V2.9 ) .""" | # Initialize variables
_mat = np . zeros ( ( 3 , 3 ) , dtype = np . float64 )
_xorg = imgarr [ 0 ] [ 0 ]
_yorg = imgarr [ 0 ] [ 1 ]
_xoorg = refarr [ 0 ] [ 0 ]
_yoorg = refarr [ 0 ] [ 1 ]
_sigxox = 0.
_sigxoy = 0.
_sigxo = 0.
_sigyox = 0.
_sigyoy = 0.
_sigyo = 0.
_npos = len ( imgarr )
# Populate matrices
for i in range ( _npos ) :
_mat [ 0 ] [ 0 ] += np . power ( ( imgarr [ i ] [ 0 ] - _xorg ) , 2 )
_mat [ 0 ] [ 1 ] += ( imgarr [ i ] [ 0 ] - _xorg ) * ( imgarr [ i ] [ 1 ] - _yorg )
_mat [ 0 ] [ 2 ] += ( imgarr [ i ] [ 0 ] - _xorg )
_mat [ 1 ] [ 1 ] += np . power ( ( imgarr [ i ] [ 1 ] - _yorg ) , 2 )
_mat [ 1 ] [ 2 ] += imgarr [ i ] [ 1 ] - _yorg
_sigxox += ( refarr [ i ] [ 0 ] - _xoorg ) * ( imgarr [ i ] [ 0 ] - _xorg )
_sigxoy += ( refarr [ i ] [ 0 ] - _xoorg ) * ( imgarr [ i ] [ 1 ] - _yorg )
_sigxo += refarr [ i ] [ 0 ] - _xoorg
_sigyox += ( refarr [ i ] [ 1 ] - _yoorg ) * ( imgarr [ i ] [ 0 ] - _xorg )
_sigyoy += ( refarr [ i ] [ 1 ] - _yoorg ) * ( imgarr [ i ] [ 1 ] - _yorg )
_sigyo += refarr [ i ] [ 1 ] - _yoorg
_mat [ 2 ] [ 2 ] = _npos
_mat [ 1 ] [ 0 ] = _mat [ 0 ] [ 1 ]
_mat [ 2 ] [ 0 ] = _mat [ 0 ] [ 2 ]
_mat [ 2 ] [ 1 ] = _mat [ 1 ] [ 2 ]
# Now invert this matrix
_mat = linalg . inv ( _mat )
_a = _sigxox * _mat [ 0 ] [ 0 ] + _sigxoy * _mat [ 0 ] [ 1 ] + _sigxo * _mat [ 0 ] [ 2 ]
_b = - 1 * ( _sigxox * _mat [ 1 ] [ 0 ] + _sigxoy * _mat [ 1 ] [ 1 ] + _sigxo * _mat [ 1 ] [ 2 ] )
# _ x0 = _ sigxox * _ mat [ 2 ] [ 0 ] + _ sigxoy * _ mat [ 2 ] [ 1 ] + _ sigxo * _ mat [ 2 ] [ 2]
_c = _sigyox * _mat [ 1 ] [ 0 ] + _sigyoy * _mat [ 1 ] [ 1 ] + _sigyo * _mat [ 1 ] [ 2 ]
_d = _sigyox * _mat [ 0 ] [ 0 ] + _sigyoy * _mat [ 0 ] [ 1 ] + _sigyo * _mat [ 0 ] [ 2 ]
# _ y0 = _ sigyox * _ mat [ 2 ] [ 0 ] + _ sigyoy * _ mat [ 2 ] [ 1 ] + _ sigyo * _ mat [ 2 ] [ 2]
_xt = _xoorg - _a * _xorg + _b * _yorg
_yt = _yoorg - _d * _xorg - _c * _yorg
return [ _a , _b , _xt ] , [ _c , _d , _yt ] |
def calc_normal_std_he_backward ( inmaps , outmaps , kernel = ( 1 , 1 ) ) :
r"""Calculates the standard deviation of He et al . ( backward case ) .
. . math : :
\ sigma = \ sqrt { \ frac { 2 } { MK } }
Args :
inmaps ( int ) : Map size of an input Variable , : math : ` N ` .
outmaps ( int ) : Map size of an output Variable , : math : ` M ` .
kernel ( : obj : ` tuple ` of : obj : ` int ` ) : Convolution kernel spatial shape .
In above definition , : math : ` K ` is the product of shape dimensions .
In Affine , the default value should be used .
Example :
. . code - block : : python
import nnabla as nn
import nnabla . parametric _ functions as PF
import nnabla . initializer as I
x = nn . Variable ( [ 60,1,28,28 ] )
s = I . calc _ normal _ std _ he _ backward ( x . shape [ 1 ] , 64)
w = I . NormalInitializer ( s )
b = I . ConstantInitializer ( 0)
h = PF . convolution ( x , 64 , [ 3 , 3 ] , w _ init = w , b _ init = b , pad = [ 1 , 1 ] , name = ' conv ' )
References :
* ` He , et al . Delving Deep into Rectifiers : Surpassing Human - Level
Performance on ImageNet Classification .
< https : / / arxiv . org / abs / 1502.01852 > ` _""" | return np . sqrt ( 2. / ( np . prod ( kernel ) * outmaps ) ) |
def load_styles ( path_or_doc ) :
"""Return a dictionary of all styles contained in an ODF document .""" | if isinstance ( path_or_doc , string_types ) :
doc = load ( path_or_doc )
else : # Recover the OpenDocumentText instance .
if isinstance ( path_or_doc , ODFDocument ) :
doc = path_or_doc . _doc
else :
doc = path_or_doc
assert isinstance ( doc , OpenDocument ) , doc
styles = { _style_name ( style ) : style for style in doc . styles . childNodes }
return styles |
def auto_invalidate ( self ) :
"""Invalidate the cache if the current time is past the time to live .""" | current = datetime . now ( )
if current > self . _invalidated + timedelta ( seconds = self . _timetolive ) :
self . invalidate ( ) |
def document ( self ) :
""": return : the : class : ` Document ` node that contains this node ,
or ` ` self ` ` if this node is the document .""" | if self . is_document :
return self
return self . adapter . wrap_document ( self . adapter . impl_document ) |
def get_assessment_taken_id ( self ) :
"""Gets the ` ` Id ` ` of the ` ` AssessmentTaken ` ` .
return : ( osid . id . Id ) - the assessment taken ` ` Id ` `
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . learning . Activity . get _ objective _ id
if not bool ( self . _my_map [ 'assessmentTakenId' ] ) :
raise errors . IllegalState ( 'assessment_taken empty' )
return Id ( self . _my_map [ 'assessmentTakenId' ] ) |
def make_sudo_cmd ( sudo_user , executable , cmd ) :
"""helper function for connection plugins to create sudo commands""" | # Rather than detect if sudo wants a password this time , - k makes
# sudo always ask for a password if one is required .
# Passing a quoted compound command to sudo ( or sudo - s )
# directly doesn ' t work , so we shellquote it with pipes . quote ( )
# and pass the quoted string to the user ' s shell . We loop reading
# output until we see the randomly - generated sudo prompt set with
# the - p option .
randbits = '' . join ( chr ( random . randint ( ord ( 'a' ) , ord ( 'z' ) ) ) for x in xrange ( 32 ) )
prompt = '[sudo via ansible, key=%s] password: ' % randbits
sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( C . DEFAULT_SUDO_EXE , C . DEFAULT_SUDO_EXE , C . DEFAULT_SUDO_FLAGS , prompt , sudo_user , executable or '$SHELL' , pipes . quote ( cmd ) )
return ( '/bin/sh -c ' + pipes . quote ( sudocmd ) , prompt ) |
def check_usage ( docstring , argv = None , usageifnoargs = False ) :
"""Check if the program has been run with a - - help argument ; if so ,
print usage information and exit .
: arg str docstring : the program help text
: arg argv : the program arguments ; taken as : data : ` sys . argv ` if
given as : const : ` None ` ( the default ) . ( Note that this implies
` ` argv [ 0 ] ` ` should be the program name and not the first option . )
: arg bool usageifnoargs : if : const : ` True ` , usage information will be
printed and the program will exit if no command - line arguments are
passed . If " long " , print long usasge . Default is : const : ` False ` .
This function is intended for small programs launched from the command
line . The intention is for the program help information to be written in
its docstring , and then for the preamble to contain something like : :
\" \" \" myprogram - this is all the usage help you get \" \" \"
import sys
. . . # other setup
check _ usage ( _ _ doc _ _ )
. . . # go on with business
If it is determined that usage information should be shown ,
: func : ` show _ usage ` is called and the program exits .
See also : func : ` wrong _ usage ` .""" | if argv is None :
from sys import argv
if len ( argv ) == 1 and usageifnoargs :
show_usage ( docstring , ( usageifnoargs != 'long' ) , None , 0 )
if len ( argv ) == 2 and argv [ 1 ] in ( '-h' , '--help' ) :
show_usage ( docstring , False , None , 0 ) |
def yellow ( cls , string , auto = False ) :
"""Color - code entire string .
: param str string : String to colorize .
: param bool auto : Enable auto - color ( dark / light terminal ) .
: return : Class instance for colorized string .
: rtype : Color""" | return cls . colorize ( 'yellow' , string , auto = auto ) |
def check_for_rerun_user_task ( self ) :
"""Checks that the user task needs to re - run .
If necessary , current task and pre task ' s states are changed and re - run .
If wf _ meta not in data ( there is no user interaction from pre - task ) and last completed task
type is user task and current step is not EndEvent and there is no lane change ,
this user task is rerun .""" | data = self . current . input
if 'wf_meta' in data :
return
current_task = self . workflow . get_tasks ( Task . READY ) [ 0 ]
current_task_type = current_task . task_spec . __class__ . __name__
pre_task = current_task . parent
pre_task_type = pre_task . task_spec . __class__ . __name__
if pre_task_type != 'UserTask' :
return
if current_task_type == 'EndEvent' :
return
pre_lane = pre_task . task_spec . lane
current_lane = current_task . task_spec . lane
if pre_lane == current_lane :
pre_task . _set_state ( Task . READY )
current_task . _set_state ( Task . MAYBE ) |
def OpenFileObject ( cls , path_spec_object , resolver_context = None ) :
"""Opens a file - like object defined by path specification .
Args :
path _ spec _ object ( PathSpec ) : path specification .
resolver _ context ( Optional [ Context ] ) : resolver context , where None
represents the built in context which is not multi process safe .
Returns :
FileIO : file - like object or None if the path specification could not
be resolved .
Raises :
PathSpecError : if the path specification is incorrect .
TypeError : if the path specification type is unsupported .""" | if not isinstance ( path_spec_object , path_spec . PathSpec ) :
raise TypeError ( 'Unsupported path specification type.' )
if resolver_context is None :
resolver_context = cls . _resolver_context
if path_spec_object . type_indicator == definitions . TYPE_INDICATOR_MOUNT :
if path_spec_object . HasParent ( ) :
raise errors . PathSpecError ( 'Unsupported mount path specification with parent.' )
mount_point = getattr ( path_spec_object , 'identifier' , None )
if not mount_point :
raise errors . PathSpecError ( 'Unsupported path specification without mount point identifier.' )
path_spec_object = mount_manager . MountPointManager . GetMountPoint ( mount_point )
if not path_spec_object :
raise errors . MountPointError ( 'No such mount point: {0:s}' . format ( mount_point ) )
file_object = resolver_context . GetFileObject ( path_spec_object )
if not file_object :
resolver_helper = cls . _GetResolverHelper ( path_spec_object . type_indicator )
file_object = resolver_helper . NewFileObject ( resolver_context )
file_object . open ( path_spec = path_spec_object )
return file_object |
def end_headers ( self ) :
"""Ends the headers part""" | # Send them all at once
for name , value in self . _headers . items ( ) :
self . _handler . send_header ( name , value )
self . _handler . end_headers ( ) |
def Convert ( self , metadata , checkresult , token = None ) :
"""Converts a single CheckResult .
Args :
metadata : ExportedMetadata to be used for conversion .
checkresult : CheckResult to be converted .
token : Security token .
Yields :
Resulting ExportedCheckResult . Empty list is a valid result and means that
conversion wasn ' t possible .""" | if checkresult . HasField ( "anomaly" ) :
for anomaly in checkresult . anomaly :
exported_anomaly = ExportedAnomaly ( type = anomaly . type , severity = anomaly . severity , confidence = anomaly . confidence )
if anomaly . symptom :
exported_anomaly . symptom = anomaly . symptom
if anomaly . explanation :
exported_anomaly . explanation = anomaly . explanation
if anomaly . generated_by :
exported_anomaly . generated_by = anomaly . generated_by
if anomaly . anomaly_reference_id :
exported_anomaly . anomaly_reference_id = "\n" . join ( anomaly . anomaly_reference_id )
if anomaly . finding :
exported_anomaly . finding = "\n" . join ( anomaly . finding )
yield ExportedCheckResult ( metadata = metadata , check_id = checkresult . check_id , anomaly = exported_anomaly )
else :
yield ExportedCheckResult ( metadata = metadata , check_id = checkresult . check_id ) |
def parse_xml ( self , node ) :
"""Parse a Tileset from ElementTree xml element
A bit of mangling is done here so that tilesets that have external
TSX files appear the same as those that don ' t
: param node : ElementTree element
: return : self""" | import os
# if true , then node references an external tileset
source = node . get ( 'source' , None )
if source :
if source [ - 4 : ] . lower ( ) == ".tsx" : # external tilesets don ' t save this , store it for later
self . firstgid = int ( node . get ( 'firstgid' ) )
# we need to mangle the path - tiled stores relative paths
dirname = os . path . dirname ( self . parent . filename )
path = os . path . abspath ( os . path . join ( dirname , source ) )
try :
node = ElementTree . parse ( path ) . getroot ( )
except IOError :
msg = "Cannot load external tileset: {0}"
logger . error ( msg . format ( path ) )
raise Exception
else :
msg = "Found external tileset, but cannot handle type: {0}"
logger . error ( msg . format ( self . source ) )
raise Exception
self . _set_properties ( node )
# since tile objects [ probably ] don ' t have a lot of metadata ,
# we store it separately in the parent ( a TiledMap instance )
register_gid = self . parent . register_gid
for child in node . getiterator ( 'tile' ) :
tiled_gid = int ( child . get ( "id" ) )
p = { k : types [ k ] ( v ) for k , v in child . items ( ) }
p . update ( parse_properties ( child ) )
# images are listed as relative to the . tsx file , not the . tmx file :
if source and "path" in p :
p [ "path" ] = os . path . join ( os . path . dirname ( source ) , p [ "path" ] )
# handle tiles that have their own image
image = child . find ( 'image' )
if image is None :
p [ 'width' ] = self . tilewidth
p [ 'height' ] = self . tileheight
else :
tile_source = image . get ( 'source' )
# images are listed as relative to the . tsx file , not the . tmx file :
if tile_source :
tile_source = os . path . join ( os . path . dirname ( source ) , tile_source )
p [ 'source' ] = tile_source
p [ 'trans' ] = image . get ( 'trans' , None )
p [ 'width' ] = image . get ( 'width' )
p [ 'height' ] = image . get ( 'height' )
# handle tiles with animations
anim = child . find ( 'animation' )
frames = list ( )
p [ 'frames' ] = frames
if anim is not None :
for frame in anim . findall ( "frame" ) :
duration = int ( frame . get ( 'duration' ) )
gid = register_gid ( int ( frame . get ( 'tileid' ) ) + self . firstgid )
frames . append ( AnimationFrame ( gid , duration ) )
for gid , flags in self . parent . map_gid2 ( tiled_gid + self . firstgid ) :
self . parent . set_tile_properties ( gid , p )
# handle the optional ' tileoffset ' node
self . offset = node . find ( 'tileoffset' )
if self . offset is None :
self . offset = ( 0 , 0 )
else :
self . offset = ( self . offset . get ( 'x' , 0 ) , self . offset . get ( 'y' , 0 ) )
image_node = node . find ( 'image' )
if image_node is not None :
self . source = image_node . get ( 'source' )
# When loading from tsx , tileset image path is relative to the tsx file , not the tmx :
if source :
self . source = os . path . join ( os . path . dirname ( source ) , self . source )
self . trans = image_node . get ( 'trans' , None )
self . width = int ( image_node . get ( 'width' ) )
self . height = int ( image_node . get ( 'height' ) )
return self |
def factory ( description = "" , codes = [ 200 ] , response_example = None , response_ctor = None , ) :
"""desc : Describes a response to an API call
args :
- name : description
type : str
desc : A description of the condition that causes this response
required : false
default : " "
- name : codes
type : int
desc : >
One or more HTTP status codes associated with
this response
required : false
default : [ 200]
- name : response _ example
type : dict
desc : An example JSON response body
required : false
default : null
- name : response _ help
type : DocString
desc : Help for @ response _ example
required : false
default : null""" | return RouteMethodResponse ( description , codes , response_example , DocString . from_ctor ( response_ctor ) if response_ctor else None , ) |
def mjd_to_ut_datetime ( self , mjd , sqlDate = False , datetimeObject = False ) :
"""* mjd to ut datetime *
Precision should be respected .
* * Key Arguments : * *
- ` ` mjd ` ` - - time in MJD .
- ` ` sqlDate ` ` - - add a ' T ' between date and time instead of space
- ` ` datetimeObject ` ` - - return a datetime object instead of a string . Default * False *
. . todo : :
- replace getDateFromMJD in all code
- replace getSQLDateFromMJD in all code
* * Return : * *
- ` ` utDatetime ` ` - the UT datetime in string format
* * Usage : * *
. . code - block : : python
from astrocalc . times import conversions
converter = conversions (
log = log
utDate = converter . mjd _ to _ ut _ datetime (
mjd = 57504.61577585013
print utDate
# OUT : 2016-04-26 14:46:43.033
utDate = converter . mjd _ to _ ut _ datetime (
mjd = 57504.61577585013,
sqlDate = True
print utDate
# OUT : 2016-04-26T14:46:43.033""" | self . log . info ( 'starting the ``mjd_to_ut_datetime`` method' )
from datetime import datetime
# CONVERT TO UNIXTIME
unixtime = ( float ( mjd ) + 2400000.5 - 2440587.5 ) * 86400.0
theDate = datetime . utcfromtimestamp ( unixtime )
if datetimeObject == False : # DETERMINE PRECISION
strmjd = repr ( mjd )
if "." not in strmjd :
precisionUnit = "day"
precision = 0
utDatetime = theDate . strftime ( "%Y-%m-%d" )
else :
lenDec = len ( strmjd . split ( "." ) [ - 1 ] )
if lenDec < 2 :
precisionUnit = "day"
precision = 0
utDatetime = theDate . strftime ( "%Y-%m-%d" )
elif lenDec < 3 :
precisionUnit = "hour"
precision = 0
utDatetime = theDate . strftime ( "%Y-%m-%d" )
elif lenDec < 5 :
precisionUnit = "minute"
precision = 0
utDatetime = theDate . strftime ( "%Y-%m-%d %H:%M" )
else :
precisionUnit = "second"
precision = lenDec - 5
if precision > 3 :
precision = 3
secs = float ( theDate . strftime ( "%S.%f" ) )
secs = "%02.*f" % ( precision , secs )
utDatetime = theDate . strftime ( "%Y-%m-%d %H:%M:" ) + secs
if sqlDate :
utDatetime = utDatetime . replace ( " " , "T" )
else :
utDatetime = theDate
self . log . info ( 'completed the ``mjd_to_ut_datetime`` method' )
return utDatetime |
def cancel_all ( self , product_id = None ) :
"""With best effort , cancel all open orders .
Args :
product _ id ( Optional [ str ] ) : Only cancel orders for this
product _ id
Returns :
list : A list of ids of the canceled orders . Example : :
"144c6f8e - 713f - 4682-8435-5280fbe8b2b4 " ,
" debe4907-95dc - 442f - af3b - cec12f42ebda " ,
" cf7aceee - 7b08-4227 - a76c - 3858144323ab " ,
" dfc5ae27 - cadb - 4c0c - beef - 8994936fde8a " ,
"34fecfbf - de33-4273 - b2c6 - baf8e8948be4" """ | if product_id is not None :
params = { 'product_id' : product_id }
else :
params = None
return self . _send_message ( 'delete' , '/orders' , params = params ) |
def collate ( data : Iterable , reverse : bool = False ) -> List [ str ] :
""": param list data : a list of strings to be sorted
: param bool reverse : reverse flag , set to get the result in descending order
: return : a list of strings , sorted alphabetically , according to Thai rules
* * Example * * : :
> > > from pythainlp . util import *
> > > collate ( [ ' ไก่ ' , ' เป็ด ' , ' หมู ' , ' วัว ' ] )
[ ' ไก่ ' , ' เป็ด ' , ' วัว ' , ' หมู ' ]""" | return sorted ( data , key = _thkey , reverse = reverse ) |
def get_realms_by_explosion ( self , realms ) :
"""Get all members of this realm including members of sub - realms on multi - levels
: param realms : realms list , used to look for a specific one
: type realms : alignak . objects . realm . Realms
: return : list of members and add realm to realm _ members attribute
: rtype : list""" | # If rec _ tag is already set , then we detected a loop in the realms hierarchy !
if getattr ( self , 'rec_tag' , False ) :
self . add_error ( "Error: there is a loop in the realm definition %s" % self . get_name ( ) )
return None
# Ok , not in a loop , we tag the realm and parse its members
self . rec_tag = True
# Order realm members list by name
self . realm_members = sorted ( self . realm_members )
for member in self . realm_members :
realm = realms . find_by_name ( member )
if not realm :
self . add_unknown_members ( member )
continue
children = realm . get_realms_by_explosion ( realms )
if children is None : # We got a loop in our children definition
self . all_sub_members = [ ]
self . realm_members = [ ]
return None
# Return the list of all unique members
return self . all_sub_members |
def raw_content ( self , output = None , str_output = None ) :
"""Searches for ` output ` regex match within content of page , regardless of mimetype .""" | return self . _search_page ( output , str_output , self . response . data , lambda regex , content : regex . search ( content . decode ( ) ) ) |
def do_sing ( self , arg ) :
"""Sing a colorful song .""" | color_escape = COLORS . get ( self . songcolor , Fore . RESET )
self . poutput ( arg , color = color_escape ) |
def reconfigure_resolver ( ) :
"""Reset the resolver configured for this thread to a fresh instance . This
essentially re - reads the system - wide resolver configuration .
If a custom resolver has been set using : func : ` set _ resolver ` , the flag
indicating that no automatic re - configuration shall take place is cleared .""" | global _state
_state . resolver = dns . resolver . Resolver ( )
_state . overridden_resolver = False |
def _handle_backend_error ( self , exception , idp ) :
"""See super class satosa . frontends . base . FrontendModule
: type exception : satosa . exception . SATOSAAuthenticationError
: type idp : saml . server . Server
: rtype : satosa . response . Response
: param exception : The SATOSAAuthenticationError
: param idp : The saml frontend idp server
: return : A response""" | loaded_state = self . load_state ( exception . state )
relay_state = loaded_state [ "relay_state" ]
resp_args = loaded_state [ "resp_args" ]
error_resp = idp . create_error_response ( resp_args [ "in_response_to" ] , resp_args [ "destination" ] , Exception ( exception . message ) )
http_args = idp . apply_binding ( resp_args [ "binding" ] , str ( error_resp ) , resp_args [ "destination" ] , relay_state , response = True )
satosa_logging ( logger , logging . DEBUG , "HTTPargs: %s" % http_args , exception . state )
return make_saml_response ( resp_args [ "binding" ] , http_args ) |
def handle_read ( repo , ** kwargs ) :
"""handles reading repo information""" | log . info ( 'read: %s %s' % ( repo , kwargs ) )
if type ( repo ) in [ unicode , str ] :
return { 'name' : 'Repo' , 'desc' : 'Welcome to Grit' , 'comment' : '' }
else :
return repo . serialize ( ) |
def get_games_by_season ( self , season ) :
"""Game schedule for a specified season .""" | try :
season = int ( season )
except ValueError :
raise FantasyDataError ( 'Error: Invalid method parameters' )
result = self . _method_call ( "Games/{season}" , "stats" , season = season )
return result |
def add_observer ( self , callable_ , entity_type = None , action = None , entity_id = None , predicate = None ) :
"""Register an " on - model - change " callback
Once the model is connected , ` ` callable _ ` `
will be called each time the model changes . ` ` callable _ ` ` should
be Awaitable and accept the following positional arguments :
delta - An instance of : class : ` juju . delta . EntityDelta `
containing the raw delta data recv ' d from the Juju
websocket .
old _ obj - If the delta modifies an existing object in the model ,
old _ obj will be a copy of that object , as it was before the
delta was applied . Will be None if the delta creates a new
entity in the model .
new _ obj - A copy of the new or updated object , after the delta
is applied . Will be None if the delta removes an entity
from the model .
model - The : class : ` Model ` itself .
Events for which ` ` callable _ ` ` is called can be specified by passing
entity _ type , action , and / or entitiy _ id filter criteria , e . g . : :
add _ observer (
myfunc ,
entity _ type = ' application ' , action = ' add ' , entity _ id = ' ubuntu ' )
For more complex filtering conditions , pass a predicate function . It
will be called with a delta as its only argument . If the predicate
function returns True , the ` ` callable _ ` ` will be called .""" | observer = _Observer ( callable_ , entity_type , action , entity_id , predicate )
self . _observers [ observer ] = callable_ |
def sync_original_prompt ( self , sync_multiplier = 1.0 ) :
'''This attempts to find the prompt . Basically , press enter and record
the response ; press enter again and record the response ; if the two
responses are similar then assume we are at the original prompt .
This can be a slow function . Worst case with the default sync _ multiplier
can take 12 seconds . Low latency connections are more likely to fail
with a low sync _ multiplier . Best case sync time gets worse with a
high sync multiplier ( 500 ms with default ) .''' | # All of these timing pace values are magic .
# I came up with these based on what seemed reliable for
# connecting to a heavily loaded machine I have .
self . sendline ( )
time . sleep ( 0.1 )
try : # Clear the buffer before getting the prompt .
self . try_read_prompt ( sync_multiplier )
except TIMEOUT :
pass
self . sendline ( )
x = self . try_read_prompt ( sync_multiplier )
self . sendline ( )
a = self . try_read_prompt ( sync_multiplier )
self . sendline ( )
b = self . try_read_prompt ( sync_multiplier )
ld = self . levenshtein_distance ( a , b )
len_a = len ( a )
if len_a == 0 :
return False
if float ( ld ) / len_a < 0.4 :
return True
return False |
def cumulative_value ( self , slip , mmax , mag_value , bbar , dbar , beta ) :
'''Returns the rate of events with M > mag _ value
: param float slip :
Slip rate in mm / yr
: param float mmax :
Maximum magnitude
: param float mag _ value :
Magnitude value
: param float bbar :
\b ar { b } parameter ( effectively = b * log ( 10 . ) )
: param float dbar :
\b ar { d } parameter
: param float beta :
Beta value of formula defined in Eq . 20 of Anderson & Luco ( 1983)''' | delta_m = mmax - mag_value
a_2 = self . _get_a2_value ( bbar , dbar , slip / 10. , beta , mmax )
return a_2 * ( np . exp ( bbar * delta_m ) - 1.0 ) * ( delta_m > 0.0 ) |
def set_branching_model ( self , project , repository , data ) :
"""Set branching model
: param project :
: param repository :
: param data :
: return :""" | url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration' . format ( project = project , repository = repository )
return self . put ( url , data = data ) |
def httpauth_login_required ( func ) :
"""Put this decorator before your view to check if the user is logged in
via httpauth and return a JSON 401 error if he / she is not .""" | def wrapper ( request , * args , ** kwargs ) :
user = None
# get the Basic username and password from the request .
auth_string = request . META . get ( 'HTTP_AUTHORIZATION' , None )
if auth_string :
( authmeth , auth ) = auth_string . split ( " " , 1 )
auth = auth . strip ( ) . decode ( 'base64' )
( username , password ) = auth . split ( ':' , 1 )
# print username , password
user = authenticate ( username = username , password = password )
if not user or not user . is_active :
return HttpResponse ( unauthorized_json_response ( ) , content_type = "application/json" , status = 401 )
login ( request , user )
return func ( request , * args , ** kwargs )
return update_wrapper ( wrapper , func ) |
def serve_file ( load , fnd ) :
'''Return a chunk from a file based on the data received''' | if 'env' in load : # " env " is not supported ; Use " saltenv " .
load . pop ( 'env' )
ret = { 'data' : '' , 'dest' : '' }
if 'path' not in load or 'loc' not in load or 'saltenv' not in load :
return ret
if not fnd [ 'path' ] :
return ret
ret [ 'dest' ] = fnd [ 'rel' ]
gzip = load . get ( 'gzip' , None )
fpath = os . path . normpath ( fnd [ 'path' ] )
with salt . utils . files . fopen ( fpath , 'rb' ) as fp_ :
fp_ . seek ( load [ 'loc' ] )
data = fp_ . read ( __opts__ [ 'file_buffer_size' ] )
if gzip and data :
data = salt . utils . gzip_util . compress ( data , gzip )
ret [ 'gzip' ] = gzip
ret [ 'data' ] = data
return ret |
def entry_id_from_cobra_encoding ( cobra_id ) :
"""Convert COBRA - encoded ID string to decoded ID string .""" | for escape , symbol in iteritems ( _COBRA_DECODE_ESCAPES ) :
cobra_id = cobra_id . replace ( escape , symbol )
return cobra_id |
def releaseNetToMs ( ) :
"""RELEASE Section 9.3.18.1""" | a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x2d )
# 00101101
c = CauseHdr ( ieiC = 0x08 , eightBitC = 0x0 )
d = CauseHdr ( ieiC = 0x08 , eightBitC = 0x0 )
e = FacilityHdr ( ieiF = 0x1C , eightBitF = 0x0 )
f = UserUserHdr ( ieiUU = 0x7E , eightBitUU = 0x0 )
packet = a / b / c / d / e / f
return packet |
def Write ( self , * state_args , ** state_dict ) :
"""See ` phi . dsl . Expression . Read `""" | if len ( state_dict ) + len ( state_args ) < 1 :
raise Exception ( "Please include at-least 1 state variable, got {0} and {1}" . format ( state_args , state_dict ) )
if len ( state_dict ) > 1 :
raise Exception ( "Please include at-most 1 keyword argument expression, got {0}" . format ( state_dict ) )
if len ( state_dict ) > 0 :
state_key = next ( iter ( state_dict . keys ( ) ) )
write_expr = state_dict [ state_key ]
state_args += ( state_key , )
expr = self >> write_expr
else :
expr = self
def g ( x , state ) :
update = { key : x for key in state_args }
state = utils . merge ( state , update )
# side effect for convenience
_StateContextManager . REFS . update ( state )
return x , state
return expr . __then__ ( g ) |
def get_attrtext ( value ) :
"""attrtext = 1 * ( any non - ATTRIBUTE _ ENDS character )
We allow any non - ATTRIBUTE _ ENDS in attrtext , but add defects to the
token ' s defects list if we find non - attrtext characters . We also register
defects for * any * non - printables even though the RFC doesn ' t exclude all of
them , because we follow the spirit of RFC 5322.""" | m = _non_attribute_end_matcher ( value )
if not m :
raise errors . HeaderParseError ( "expected attrtext but found {!r}" . format ( value ) )
attrtext = m . group ( )
value = value [ len ( attrtext ) : ]
attrtext = ValueTerminal ( attrtext , 'attrtext' )
_validate_xtext ( attrtext )
return attrtext , value |
def generate_signing_key ( args ) :
"""Generate an ECDSA signing key for signing secure boot images ( post - bootloader )""" | if os . path . exists ( args . keyfile ) :
raise esptool . FatalError ( "ERROR: Key file %s already exists" % args . keyfile )
sk = ecdsa . SigningKey . generate ( curve = ecdsa . NIST256p )
with open ( args . keyfile , "wb" ) as f :
f . write ( sk . to_pem ( ) )
print ( "ECDSA NIST256p private key in PEM format written to %s" % args . keyfile ) |
def summary ( self , CorpNum , JobID , TradeType , TradeUsage , UserID = None ) :
"""수집 결과 요약정보 조회
args
CorpNum : 팝빌회원 사업자번호
JobID : 작업아이디
TradeType : 문서형태 배열 , N - 일반 현금영수증 , C - 취소 현금영수증
TradeUsage : 거래구분 배열 , P - 소등공제용 , C - 지출증빙용
UserID : 팝빌회원 아이디
return
수집 결과 요약정보
raise
PopbillException""" | if JobID == None or len ( JobID ) != 18 :
raise PopbillException ( - 99999999 , "작업아이디(jobID)가 올바르지 않습니다." )
uri = '/HomeTax/Cashbill/' + JobID + '/Summary'
uri += '?TradeType=' + ',' . join ( TradeType )
uri += '&TradeUsage=' + ',' . join ( TradeUsage )
return self . _httpget ( uri , CorpNum , UserID ) |
def get_pytorch_link ( ft ) -> str :
"Returns link to pytorch docs of ` ft ` ." | name = ft . __name__
ext = '.html'
if name == 'device' :
return f'{PYTORCH_DOCS}tensor_attributes{ext}#torch-device'
if name == 'Tensor' :
return f'{PYTORCH_DOCS}tensors{ext}#torch-tensor'
if name . startswith ( 'torchvision' ) :
doc_path = get_module_name ( ft ) . replace ( '.' , '/' )
if inspect . ismodule ( ft ) :
name = name . replace ( '.' , '-' )
return f'{PYTORCH_DOCS}{doc_path}{ext}#{name}'
if name . startswith ( 'torch.nn' ) and inspect . ismodule ( ft ) : # nn . functional is special case
nn_link = name . replace ( '.' , '-' )
return f'{PYTORCH_DOCS}nn{ext}#{nn_link}'
paths = get_module_name ( ft ) . split ( '.' )
if len ( paths ) == 1 :
return f'{PYTORCH_DOCS}{paths[0]}{ext}#{paths[0]}.{name}'
offset = 1 if paths [ 1 ] == 'utils' else 0
# utils is a pytorch special case
doc_path = paths [ 1 + offset ]
if inspect . ismodule ( ft ) :
return f'{PYTORCH_DOCS}{doc_path}{ext}#module-{name}'
fnlink = '.' . join ( paths [ : ( 2 + offset ) ] + [ name ] )
return f'{PYTORCH_DOCS}{doc_path}{ext}#{fnlink}' |
def get_configuration_dict ( self , secret_attrs = False ) :
"""Type - specific configuration for backward compatibility""" | cd = { 'repo_nexml2json' : self . repo_nexml2json , 'number_of_shards' : len ( self . _shards ) , 'initialization' : self . _filepath_args , 'shards' : [ ] , }
for i in self . _shards :
cd [ 'shards' ] . append ( i . get_configuration_dict ( secret_attrs = secret_attrs ) )
return cd |
def calculateHurst ( self , series , exponent = None ) :
''': type series : List
: type exponent : int
: rtype : float''' | rescaledRange = list ( )
sizeRange = list ( )
rescaledRangeMean = list ( )
if ( exponent is None ) :
exponent = self . bestExponent ( len ( series ) )
for i in range ( 0 , exponent ) :
partsNumber = int ( math . pow ( 2 , i ) )
size = int ( len ( series ) / partsNumber )
sizeRange . append ( size )
rescaledRange . append ( 0 )
rescaledRangeMean . append ( 0 )
for x in range ( 0 , partsNumber ) :
start = int ( size * ( x ) )
limit = int ( size * ( x + 1 ) )
deviationAcumulative = self . sumDeviation ( self . deviation ( series , start , limit , self . mean ( series , start , limit ) ) )
deviationsDifference = float ( max ( deviationAcumulative ) - min ( deviationAcumulative ) )
standartDeviation = self . standartDeviation ( series , start , limit )
if ( deviationsDifference != 0 and standartDeviation != 0 ) :
rescaledRange [ i ] += ( deviationsDifference / standartDeviation )
y = 0
for x in rescaledRange :
rescaledRangeMean [ y ] = x / int ( math . pow ( 2 , y ) )
y = y + 1
# log calculation
rescaledRangeLog = list ( )
sizeRangeLog = list ( )
for i in range ( 0 , exponent ) :
rescaledRangeLog . append ( math . log ( rescaledRangeMean [ i ] , 10 ) )
sizeRangeLog . append ( math . log ( sizeRange [ i ] , 10 ) )
slope , intercept = np . polyfit ( sizeRangeLog , rescaledRangeLog , 1 )
ablineValues = [ slope * i + intercept for i in sizeRangeLog ]
plt . plot ( sizeRangeLog , rescaledRangeLog , '--' )
plt . plot ( sizeRangeLog , ablineValues , 'b' )
plt . title ( slope )
# graphic dimension settings
limitUp = 0
if ( max ( sizeRangeLog ) > max ( rescaledRangeLog ) ) :
limitUp = max ( sizeRangeLog )
else :
limitUp = max ( rescaledRangeLog )
limitDown = 0
if ( min ( sizeRangeLog ) > min ( rescaledRangeLog ) ) :
limitDown = min ( rescaledRangeLog )
else :
limitDown = min ( sizeRangeLog )
plt . gca ( ) . set_xlim ( limitDown , limitUp )
plt . gca ( ) . set_ylim ( limitDown , limitUp )
print ( "Hurst exponent: " + str ( slope ) )
plt . show ( )
return slope |
def check_python_classifiers ( package_info , * args ) :
"""Does the package have Python classifiers ?
: param package _ info : package _ info dictionary
: return : Tuple ( is the condition True or False ? , reason if it is False else None , score to be applied )""" | classifiers = package_info . get ( 'classifiers' )
reason = "Python classifiers missing"
result = False
if len ( [ c for c in classifiers if c . startswith ( 'Programming Language :: Python ::' ) ] ) > 0 :
result = True
return result , reason , HAS_PYTHON_CLASSIFIERS |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.