signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def line_width ( line ) :
"""计算本行在输出到命令行后所占的宽度
calculate the width of output in terminal""" | if six . PY2 :
assert isinstance ( line , unicode )
_line = width_cal_preprocess ( line )
result = sum ( map ( get_char_width , _line ) )
return result |
def _get_app_config ( self , app_name ) :
"""Returns an app config for the given name , not by label .""" | matches = [ app_config for app_config in apps . get_app_configs ( ) if app_config . name == app_name ]
if not matches :
return
return matches [ 0 ] |
def _splitlines_preserving_trailing_newline ( str ) :
'''Returns a list of the lines in the string , breaking at line boundaries and
preserving a trailing newline ( if present ) .
Essentially , this works like ` ` str . striplines ( False ) ` ` but preserves an
empty line at the end . This is equivalent to the following code :
. . code - block : : python
lines = str . splitlines ( )
if str . endswith ( ' \n ' ) or str . endswith ( ' \r ' ) :
lines . append ( ' ' )''' | lines = str . splitlines ( )
if str . endswith ( '\n' ) or str . endswith ( '\r' ) :
lines . append ( '' )
return lines |
def get_config_input_with_inactive ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_config = ET . Element ( "get_config" )
config = get_config
input = ET . SubElement ( get_config , "input" )
with_inactive = ET . SubElement ( input , "with-inactive" , xmlns = "http://tail-f.com/ns/netconf/inactive/1.0" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_or_create_candidate ( self , row , party , race ) :
"""Gets or creates the Candidate object for the given row of AP data .
In order to tie with live data , this will synthesize the proper
AP candidate id .
This function also calls ` get _ or _ create _ person ` to get a Person
object to pass to Django .""" | person = self . get_or_create_person ( row )
id_components = row [ "id" ] . split ( "-" )
candidate_id = "{0}-{1}" . format ( id_components [ 1 ] , id_components [ 2 ] )
defaults = { "party" : party , "incumbent" : row . get ( "incumbent" ) }
if person . last_name == "None of these candidates" :
candidate_id = "{0}-{1}" . format ( id_components [ 0 ] , candidate_id )
candidate , created = election . Candidate . objects . update_or_create ( person = person , race = race , ap_candidate_id = candidate_id , defaults = defaults , )
return candidate |
async def communicate ( self , data_id = None , run_sync = False , save_settings = True ) :
"""Scan database for resolving Data objects and process them .
This is submitted as a task to the manager ' s channel workers .
: param data _ id : Optional id of Data object which ( + its
children ) should be processes . If it is not given , all
resolving objects are processed .
: param run _ sync : If ` ` True ` ` , wait until all processes spawned
from this point on have finished processing . If no processes
are spawned , this results in a deadlock , since counts are
handled on process finish .
: param save _ settings : If ` ` True ` ` , save the current Django
settings context to the global state . This should never be
` ` True ` ` for " automatic " calls , such as from Django signals ,
which can be invoked from inappropriate contexts ( such as in
the listener ) . For user code , it should be left at the
default value . The saved settings are in effect until the
next such call .""" | executor = getattr ( settings , 'FLOW_EXECUTOR' , { } ) . get ( 'NAME' , 'resolwe.flow.executors.local' )
logger . debug ( __ ( "Manager sending communicate command on '{}' triggered by Data with id {}." , state . MANAGER_CONTROL_CHANNEL , data_id , ) )
saved_settings = self . state . settings_override
if save_settings :
saved_settings = self . _marshal_settings ( )
self . state . settings_override = saved_settings
if run_sync :
self . _ensure_counter ( )
await self . sync_counter . inc ( 'communicate' )
try :
await consumer . send_event ( { WorkerProtocol . COMMAND : WorkerProtocol . COMMUNICATE , WorkerProtocol . COMMUNICATE_SETTINGS : saved_settings , WorkerProtocol . COMMUNICATE_EXTRA : { 'data_id' : data_id , 'executor' : executor , } , } )
except ChannelFull :
logger . exception ( "ChannelFull error occurred while sending communicate message." )
await self . sync_counter . dec ( 'communicate' )
if run_sync and not self . sync_counter . active :
logger . debug ( __ ( "Manager on channel '{}' entering synchronization block." , state . MANAGER_CONTROL_CHANNEL ) )
await self . execution_barrier ( )
logger . debug ( __ ( "Manager on channel '{}' exiting synchronization block." , state . MANAGER_CONTROL_CHANNEL ) ) |
def readlink ( path ) :
'''Equivalent to os . readlink ( )''' | if six . PY3 or not salt . utils . platform . is_windows ( ) :
return os . readlink ( path )
if not HAS_WIN32FILE :
log . error ( 'Cannot read %s, missing required modules' , path )
reparse_data = _get_reparse_data ( path )
if not reparse_data : # Reproduce * NIX behavior when os . readlink is performed on a path that
# is not a symbolic link .
raise OSError ( errno . EINVAL , 'Invalid argument: \'{0}\'' . format ( path ) )
# REPARSE _ DATA _ BUFFER structure - see
# http : / / msdn . microsoft . com / en - us / library / ff552012 . aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct . Struct ( 'L' )
ReparseTag , = header_parser . unpack ( reparse_data [ : header_parser . size ] )
# http : / / msdn . microsoft . com / en - us / library / windows / desktop / aa365511 . aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C :
raise OSError ( errno . EINVAL , '{0} is not a symlink, but another type of reparse point ' '(0x{0:X}).' . format ( ReparseTag ) )
# parse as a symlink reparse point structure ( the structure for other
# reparse points is different )
data_parser = struct . Struct ( 'LHHHHHHL' )
ReparseTag , ReparseDataLength , Reserved , SubstituteNameOffset , SubstituteNameLength , PrintNameOffset , PrintNameLength , Flags = data_parser . unpack ( reparse_data [ : data_parser . size ] )
path_buffer_offset = data_parser . size
absolute_substitute_name_offset = path_buffer_offset + SubstituteNameOffset
target_bytes = reparse_data [ absolute_substitute_name_offset : absolute_substitute_name_offset + SubstituteNameLength ]
target = target_bytes . decode ( 'UTF-16' )
if target . startswith ( '\\??\\' ) :
target = target [ 4 : ]
try : # comes out in 8.3 form ; convert it to LFN to make it look nicer
target = win32file . GetLongPathName ( target )
except pywinerror as exc : # If target is on a UNC share , the decoded target will be in the format
# " UNC \ hostanme \ sharename \ additional \ subdirs \ under \ share " . So , in
# these cases , return the target path in the proper UNC path format .
if target . startswith ( 'UNC\\' ) :
return re . sub ( r'^UNC\\+' , r'\\\\' , target )
# if file is not found ( i . e . bad symlink ) , return it anyway like on * nix
if exc . winerror == 2 :
return target
raise
return target |
def retryable_http_error ( e ) :
"""Determine if an error encountered during an HTTP download is likely to go away if we try again .""" | if isinstance ( e , urllib . error . HTTPError ) and e . code in ( '503' , '408' , '500' ) : # The server returned one of :
# 503 Service Unavailable
# 408 Request Timeout
# 500 Internal Server Error
return True
if isinstance ( e , BadStatusLine ) : # The server didn ' t return a valid response at all
return True
return False |
def getOverlayColor ( self , ulOverlayHandle ) :
"""Gets the color tint of the overlay quad .""" | fn = self . function_table . getOverlayColor
pfRed = c_float ( )
pfGreen = c_float ( )
pfBlue = c_float ( )
result = fn ( ulOverlayHandle , byref ( pfRed ) , byref ( pfGreen ) , byref ( pfBlue ) )
return result , pfRed . value , pfGreen . value , pfBlue . value |
def atualizar_software_sat ( self ) :
"""Sobrepõe : meth : ` ~ satcfe . base . FuncoesSAT . atualizar _ software _ sat ` .
: return : Uma resposta SAT padrão .
: rtype : satcfe . resposta . padrao . RespostaSAT""" | retorno = super ( ClienteSATLocal , self ) . atualizar_software_sat ( )
return RespostaSAT . atualizar_software_sat ( retorno ) |
def predict ( self , X , break_ties = "random" , return_probs = False , ** kwargs ) :
"""Predicts int labels for an input X on all tasks
Args :
X : The input for the predict _ proba method
break _ ties : A tie - breaking policy
return _ probs : Return the predicted probabilities as well
Returns :
Y _ p : A t - length list of n - dim np . ndarrays of predictions in [ 1 , K _ t ]
[ Optionally : Y _ s : A t - length list of [ n , K _ t ] np . ndarrays of
predicted probabilities ]""" | Y_s = self . predict_proba ( X , ** kwargs )
self . _check ( Y_s , typ = list )
self . _check ( Y_s [ 0 ] , typ = np . ndarray )
Y_p = [ ]
for Y_ts in Y_s :
Y_tp = self . _break_ties ( Y_ts , break_ties )
Y_p . append ( Y_tp . astype ( np . int ) )
if return_probs :
return Y_p , Y_s
else :
return Y_p |
def load_xml ( self , xmlfile ) :
"""Load model definition from XML .
Parameters
xmlfile : str
Name of the input XML file .""" | self . logger . info ( 'Loading XML' )
for c in self . components :
c . load_xml ( xmlfile )
for name in self . like . sourceNames ( ) :
self . update_source ( name )
self . _fitcache = None
self . logger . info ( 'Finished Loading XML' ) |
def dispatch ( self , request , * args , ** kwargs ) : # type : ( HttpRequest , object , object ) - > HttpResponse
"""Inspect the HTTP method and delegate to the view method .
This is the default implementation of the
: py : class : ` django . views . View ` method , which will inspect the
HTTP method in the input request and delegate it to the
corresponding method in the view . The only allowed method on
this view is ` ` post ` ` .
: param request : The input request sent to the view
: type request : django . http . HttpRequest
: return : The response from the view
: rtype : django . http . HttpResponse
: raises : : py : class : ` django . http . HttpResponseNotAllowed ` if the
method is invoked for other than HTTP POST request .
: py : class : ` django . http . HttpResponseBadRequest ` if the
request verification fails .
: py : class : ` django . http . HttpResponseServerError ` for any
internal exception .""" | return super ( SkillAdapter , self ) . dispatch ( request ) |
def decrypt ( self , data , oaep_hash_fn_name = None , mgf1_hash_fn_name = None ) :
"""Decrypt a data that used PKCS1 OAEP protocol
: param data : data to decrypt
: param oaep _ hash _ fn _ name : hash function name to use with OAEP
: param mgf1 _ hash _ fn _ name : hash function name to use with MGF1 padding
: return : bytes""" | if self . __private_key is None :
raise ValueError ( 'Unable to call this method. Private key must be set' )
if oaep_hash_fn_name is None :
oaep_hash_fn_name = self . __class__ . __default_oaep_hash_function_name__
if mgf1_hash_fn_name is None :
mgf1_hash_fn_name = self . __class__ . __default_mgf1_hash_function_name__
oaep_hash_cls = getattr ( hashes , oaep_hash_fn_name )
mgf1_hash_cls = getattr ( hashes , mgf1_hash_fn_name )
return self . __private_key . decrypt ( data , padding . OAEP ( mgf = padding . MGF1 ( algorithm = mgf1_hash_cls ( ) ) , algorithm = oaep_hash_cls ( ) , label = None ) ) |
def strptime ( string , timezone = 0 ) :
"""necessary because of 24:00 end of day labeling""" | year = int ( string [ 0 : 4 ] )
month = int ( string [ 5 : 7 ] )
day = int ( string [ 8 : 10 ] )
hour = int ( string [ - 5 : - 3 ] )
minute = int ( string [ - 2 : ] )
ts = datetime . datetime ( year , month , day ) + datetime . timedelta ( hours = hour , minutes = minute ) - datetime . timedelta ( hours = timezone )
return ts |
def file_type ( filename , param = 'rb' ) :
"""returns the type of file , e . g . , gz , bz2 , normal""" | magic_dict = { b"\x1f\x8b\x08" : "gz" , b"\x42\x5a\x68" : "bz2" , b"\x50\x4b\x03\x04" : "zip" }
if param . startswith ( 'w' ) :
return filename . split ( '.' ) [ - 1 ]
max_len = max ( len ( x ) for x in magic_dict )
with open ( filename , 'rb' ) as f :
file_start = f . read ( max_len )
for magic , filetype in list ( magic_dict . items ( ) ) :
if file_start . startswith ( magic ) :
return filetype
return "no match" |
def contains_circle ( self , pt , radius ) :
"""Is the circle completely inside this rect ?""" | return ( self . l < pt . x - radius and self . r > pt . x + radius and self . t < pt . y - radius and self . b > pt . y + radius ) |
def create_product ( self , name , location = 'GLO' , unit = 'kg' , ** kwargs ) :
"""Create a new product in the model database""" | new_product = item_factory ( name = name , location = location , unit = unit , type = 'product' , ** kwargs )
if not self . exists_in_database ( new_product [ 'code' ] ) :
self . add_to_database ( new_product )
# print ( ' { } added to database ' . format ( name ) )
return self . get_exchange ( name )
else : # print ( ' { } already exists in this database ' . format ( name ) )
return False |
def setMinGap ( self , typeID , minGap ) :
"""setMinGap ( string , double ) - > None
Sets the offset ( gap to front vehicle if halting ) of vehicles of this type .""" | self . _connection . _sendDoubleCmd ( tc . CMD_SET_VEHICLETYPE_VARIABLE , tc . VAR_MINGAP , typeID , minGap ) |
def create_new ( projectname ) :
"""Name of the project""" | git_url = "https://github.com/moluwole/Bast_skeleton"
path = os . path . abspath ( '.' ) + "/" + projectname
if not os . path . exists ( path ) :
os . makedirs ( path )
click . echo ( Fore . GREEN + ' ___ ___ __________' )
click . echo ( Fore . GREEN + ' / _ )/ _ | / __/_ __/' )
click . echo ( Fore . GREEN + ' / _ / __ |_\ \ / /' )
click . echo ( Fore . GREEN + '/____/_/ |_/___/ /_/' )
click . echo ( Fore . GREEN + "Creating Project at %s.... " % path )
click . echo ( Fore . GREEN + "Pulling Project Skeleton from Repo" )
try :
Repo . clone_from ( git_url , path )
click . echo ( Fore . GREEN + "Setting up project" )
shutil . rmtree ( path + "/.git" )
if not os . path . exists ( '/.env' ) :
shutil . copy ( path + '/.env.example' , path + '/.env' )
env_file = path + "/.env"
if not os . path . isfile ( env_file ) :
shutil . copy ( '.env.example' , '.env' )
call ( [ 'panther' , 'generate:key' , path ] )
click . echo ( Fore . GREEN + "New Bast Project created at %s " % path )
except Exception as e :
click . echo ( Fore . RED + "An error occurred creating a new project. Try Again.\n Reason: {}" . format ( e ) ) |
def prepend_zeros_to_lists ( ls ) :
"""Takes a list of lists and appends 0s to the beggining of each sub _ list
until they are all the same length . Used for sign - extending binary numbers .""" | longest = max ( [ len ( l ) for l in ls ] )
for i in range ( len ( ls ) ) :
while len ( ls [ i ] ) < longest :
ls [ i ] . insert ( 0 , "0" ) |
def file_is_attached ( self , url ) :
'''return true if at least one book has
file with the given url as attachment''' | body = self . _get_search_field ( '_attachments.url' , url )
return self . es . count ( index = self . index_name , body = body ) [ 'count' ] > 0 |
def routing_monitoring ( self ) :
"""Return route table for the engine , including
gateway , networks and type of route ( dynamic , static ) .
Calling this can take a few seconds to retrieve routes
from the engine .
Find all routes for engine resource : :
> > > engine = Engine ( ' sg _ vm ' )
> > > for route in engine . routing _ monitoring :
. . . route
Route ( route _ network = u ' 0.0.0.0 ' , route _ netmask = 0 , route _ gateway = u ' 10.0.0.1 ' , route _ type = u ' Static ' , dst _ if = 1 , src _ if = - 1)
: raises EngineCommandFailed : routes cannot be retrieved
: return : list of route elements
: rtype : SerializedIterable ( Route )""" | try :
result = self . make_request ( EngineCommandFailed , resource = 'routing_monitoring' )
return Route ( result )
except SMCConnectionError :
raise EngineCommandFailed ( 'Timed out waiting for routes' ) |
def summarizeReads ( file_handle , file_type ) :
"""open a fasta or fastq file , prints number of of reads ,
average length of read , total number of bases , longest ,
shortest and median read , total number and average of
individual base ( A , T , G , C , N ) .""" | base_counts = defaultdict ( int )
read_number = 0
total_length = 0
length_list = [ ]
records = SeqIO . parse ( file_handle , file_type )
for record in records :
total_length += len ( record )
read_number += 1
length_list . append ( len ( record ) )
for base in record :
base_counts [ base ] += 1
result = { "read_number" : read_number , "total_length" : total_length , "average_length" : total_length / read_number if read_number > 0 else 0 , "max_length" : max ( length_list ) if length_list else 0 , "min_length" : min ( length_list ) if length_list else 0 , "median_length" : median ( length_list ) if length_list else 0 , "base_counts" : base_counts }
return result |
async def ListModels ( self , tag ) :
'''tag : str
Returns - > typing . Sequence [ ~ UserModel ]''' | # map input types to rpc msg
_params = dict ( )
msg = dict ( type = 'ModelManager' , request = 'ListModels' , version = 5 , params = _params )
_params [ 'tag' ] = tag
reply = await self . rpc ( msg )
return reply |
def get_widget ( self , request ) :
"""Table view is not able to get form field from reverse relation .
Therefore this widget returns similar form field as direct relation ( ModelChoiceField ) .
Because there is used " RestrictedSelectWidget " it is returned textarea or selectox with choices according to
count objects in the queryset .""" | return self . _update_widget_choices ( forms . ModelChoiceField ( widget = RestrictedSelectWidget , queryset = self . field . related_model . _default_manager . all ( ) ) . widget ) |
def versions_from_parentdir ( parentdir_prefix , root , verbose ) :
"""Try to determine the version from the parent directory name .
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string . We will also support searching up
two directory levels for an appropriately named parent directory""" | rootdirs = [ ]
for i in range ( 3 ) :
dirname = os . path . basename ( root )
if dirname . startswith ( parentdir_prefix ) :
return { "version" : dirname [ len ( parentdir_prefix ) : ] , "full-revisionid" : None , "dirty" : False , "error" : None , "date" : None }
else :
rootdirs . append ( root )
root = os . path . dirname ( root )
# up a level
if verbose :
print ( "Tried directories %s but none started with prefix %s" % ( str ( rootdirs ) , parentdir_prefix ) )
raise NotThisMethod ( "rootdir doesn't start with parentdir_prefix" ) |
def RetryUpload ( self , job , job_id , error ) :
"""Retry the BigQuery upload job .
Using the same job id protects us from duplicating data on the server . If we
fail all of our retries we raise .
Args :
job : BigQuery job object
job _ id : ID string for this upload job
error : errors . HttpError object from the first error
Returns :
API response object on success , None on failure
Raises :
BigQueryJobUploadError : if we can ' t get the bigquery job started after
retry _ max _ attempts""" | if self . IsErrorRetryable ( error ) :
retry_count = 0
sleep_interval = config . CONFIG [ "BigQuery.retry_interval" ]
while retry_count < config . CONFIG [ "BigQuery.retry_max_attempts" ] :
time . sleep ( sleep_interval . seconds )
logging . info ( "Retrying job_id: %s" , job_id )
retry_count += 1
try :
response = job . execute ( )
return response
except errors . HttpError as e :
if self . IsErrorRetryable ( e ) :
sleep_interval *= config . CONFIG [ "BigQuery.retry_multiplier" ]
logging . exception ( "Error with job: %s, will retry in %s" , job_id , sleep_interval )
else :
raise BigQueryJobUploadError ( "Can't retry error code %s. Giving up" " on job: %s." % ( e . resp . status , job_id ) )
else :
raise BigQueryJobUploadError ( "Can't retry error code %s. Giving up on " "job: %s." % ( error . resp . status , job_id ) )
raise BigQueryJobUploadError ( "Giving up on job:%s after %s retries." % ( job_id , retry_count ) ) |
def select_directory ( self ) :
"""Select directory""" | self . redirect_stdio . emit ( False )
directory = getexistingdirectory ( self . main , _ ( "Select directory" ) , getcwd_or_home ( ) )
if directory :
self . chdir ( directory )
self . redirect_stdio . emit ( True ) |
def check_overlap ( self , other , wavelengths = None , threshold = 0.01 ) :
"""Check for wavelength overlap between two spectra .
Only wavelengths where ` ` self ` ` throughput is non - zero
are considered .
Example of full overlap : :
| - - - - - other - - - - - |
| - - - - - self - - - - - |
Examples of partial overlap : :
| - - - - - self - - - - - |
| - - - - - other - - - - - |
| - - - - other - - - - |
| - - - - self - - - - |
| - - - - self - - - - |
| - - - - other - - - - |
Examples of no overlap : :
| - - - - self - - - - | | - - - - other - - - - |
| - - - - other - - - - | | - - - - self - - - - |
Parameters
other : ` BaseSpectrum `
wavelengths : array - like , ` ~ astropy . units . quantity . Quantity ` , or ` None `
Wavelength values for integration .
If not a Quantity , assumed to be in Angstrom .
If ` None ` , ` waveset ` is used .
threshold : float
If less than this fraction of flux or throughput falls
outside wavelength overlap , the * lack * of overlap is
* insignificant * . This is only used when partial overlap
is detected . Default is 1 % .
Returns
result : { ' full ' , ' partial _ most ' , ' partial _ notmost ' , ' none ' }
* ' full ' - ` ` self ` ` coverage is within or same as ` ` other ` `
* ' partial _ most ' - Less than ` ` threshold ` ` fraction of
` ` self ` ` flux is outside the overlapping wavelength
region , i . e . , the * lack * of overlap is * insignificant *
* ' partial _ notmost ' - ` ` self ` ` partially overlaps with
` ` other ` ` but does not qualify for ' partial _ most '
* ' none ' - ` ` self ` ` does not overlap ` ` other ` `
Raises
synphot . exceptions . SynphotError
Invalid inputs .""" | if not isinstance ( other , BaseSpectrum ) :
raise exceptions . SynphotError ( 'other must be spectrum or bandpass.' )
# Special cases where no sampling wavelengths given and
# one of the inputs is continuous .
if wavelengths is None :
if other . waveset is None :
return 'full'
if self . waveset is None :
return 'partial_notmost'
x1 = self . _validate_wavelengths ( wavelengths )
y1 = self ( x1 )
a = x1 [ y1 > 0 ] . value
b = other . _validate_wavelengths ( wavelengths ) . value
result = utils . overlap_status ( a , b )
if result == 'partial' : # If there is no need to extrapolate or taper other
# ( i . e . , other is zero at self ' s wave limits ) ,
# then we consider it as a full coverage .
# This logic assumes _ _ call _ _ never returns mag or count !
if ( ( isinstance ( other . model , Empirical1D ) and other . model . is_tapered ( ) or not isinstance ( other . model , ( Empirical1D , _CompoundModel ) ) ) and np . allclose ( other ( x1 [ : : x1 . size - 1 ] ) . value , 0 ) ) :
result = 'full'
# Check if the lack of overlap is significant .
else : # Get all the flux
totalflux = self . integrate ( wavelengths = wavelengths ) . value
utils . validate_totalflux ( totalflux )
a_min , a_max = a . min ( ) , a . max ( )
b_min , b_max = b . min ( ) , b . max ( )
# Now get the other two pieces
excluded = 0.0
if a_min < b_min :
excluded += self . integrate ( wavelengths = np . array ( [ a_min , b_min ] ) ) . value
if a_max > b_max :
excluded += self . integrate ( wavelengths = np . array ( [ b_max , a_max ] ) ) . value
if excluded / totalflux < threshold :
result = 'partial_most'
else :
result = 'partial_notmost'
return result |
def open_files ( subseqs ) :
"""Open file statements .""" | print ( ' . open_files' )
lines = Lines ( )
lines . add ( 1 , 'cpdef open_files(self, int idx):' )
for seq in subseqs :
lines . add ( 2 , 'if self._%s_diskflag:' % seq . name )
lines . add ( 3 , 'self._%s_file = fopen(str(self._%s_path).encode(), ' '"rb+")' % ( 2 * ( seq . name , ) ) )
if seq . NDIM == 0 :
lines . add ( 3 , 'fseek(self._%s_file, idx*8, SEEK_SET)' % seq . name )
else :
lines . add ( 3 , 'fseek(self._%s_file, idx*self._%s_length*8, ' 'SEEK_SET)' % ( 2 * ( seq . name , ) ) )
return lines |
def open ( filename , frame = 'unspecified' ) :
"""Create a PointCloud from data saved in a file .
Parameters
filename : : obj : ` str `
The file to load data from .
frame : : obj : ` str `
The frame to apply to the created PointCloud .
Returns
: obj : ` PointCloud `
A PointCloud created from the data in the file .""" | data = BagOfPoints . load_data ( filename )
return PointCloud ( data , frame ) |
def func ( f , xmin , xmax , step = None ) :
"""Create sample points from function < f > , which must be a
single - parameter function that returns a number ( e . g . , math . sin ) .
Parameters < xmin > and < xmax > specify the first and last X values , and
< step > specifies the sampling interval .
> > > chart _ data . func ( math . sin , 0 , math . pi * 4 , math . pi / 2)
[ ( 0 , 0.0 ) , ( 1.5707963267948966 , 1.0 ) , ( 3.1415926535897931 , 1.2246063538223773e - 16 ) , ( 4.7123889803846897 , - 1.0 ) , ( 6.2831853071795862 , - 2.4492127076447545e - 16 ) , ( 7.8539816339744828 , 1.0 ) , ( 9.4247779607693793 , 3.6738190614671318e - 16 ) , ( 10.995574287564276 , - 1.0 ) ]""" | data = [ ]
x = xmin
if not step :
step = ( xmax - xmin ) / 100.0
while x < xmax :
data . append ( ( x , f ( x ) ) )
x += step
return data |
def preferred_ordinal ( cls , attr_name ) :
"""Returns an ordering value for a particular attribute key .
Unrecognized attributes and OIDs will be sorted lexically at the end .
: return :
An orderable value .""" | attr_name = cls . map ( attr_name )
if attr_name in cls . preferred_order :
ordinal = cls . preferred_order . index ( attr_name )
else :
ordinal = len ( cls . preferred_order )
return ( ordinal , attr_name ) |
def _read_page ( file_obj , page_header , column_metadata ) :
"""Read the data page from the given file - object and convert it to raw , uncompressed bytes ( if necessary ) .""" | bytes_from_file = file_obj . read ( page_header . compressed_page_size )
codec = column_metadata . codec
if codec is not None and codec != parquet_thrift . CompressionCodec . UNCOMPRESSED :
if column_metadata . codec == parquet_thrift . CompressionCodec . SNAPPY :
raw_bytes = snappy . decompress ( bytes_from_file )
elif column_metadata . codec == parquet_thrift . CompressionCodec . GZIP :
io_obj = io . BytesIO ( bytes_from_file )
with gzip . GzipFile ( fileobj = io_obj , mode = 'rb' ) as file_data :
raw_bytes = file_data . read ( )
else :
raise ParquetFormatException ( "Unsupported Codec: {0}" . format ( codec ) )
else :
raw_bytes = bytes_from_file
if logger . isEnabledFor ( logging . DEBUG ) :
logger . debug ( "Read page with compression type %s. Bytes %d -> %d" , _get_name ( parquet_thrift . CompressionCodec , codec ) , page_header . compressed_page_size , page_header . uncompressed_page_size )
assert len ( raw_bytes ) == page_header . uncompressed_page_size , "found {0} raw bytes (expected {1})" . format ( len ( raw_bytes ) , page_header . uncompressed_page_size )
return raw_bytes |
def _rudimentary_get_command ( self , args ) :
"""Rudimentary parsing to get the command""" | nouns = [ ]
command_names = self . commands_loader . command_table . keys ( )
for arg in args :
if arg and arg [ 0 ] != '-' :
nouns . append ( arg )
else :
break
def _find_args ( args ) :
search = ' ' . join ( args ) . lower ( )
return next ( ( x for x in command_names if x . startswith ( search ) ) , False )
# since the command name may be immediately followed by a positional arg , strip those off
while nouns and not _find_args ( nouns ) :
del nouns [ - 1 ]
# ensure the command string is case - insensitive
for i in range ( len ( nouns ) ) :
args [ i ] = args [ i ] . lower ( )
return ' ' . join ( nouns ) |
def get_editor_widget ( self , request , plugins , plugin ) :
"""Returns the Django form Widget to be used for
the text area""" | cancel_url_name = self . get_admin_url_name ( 'delete_on_cancel' )
cancel_url = reverse ( 'admin:%s' % cancel_url_name )
render_plugin_url_name = self . get_admin_url_name ( 'render_plugin' )
render_plugin_url = reverse ( 'admin:%s' % render_plugin_url_name )
action_token = self . get_action_token ( request , plugin )
# should we delete the text plugin when
# the user cancels ?
delete_text_on_cancel = ( 'delete-on-cancel' in request . GET and # noqa
not plugin . get_plugin_instance ( ) [ 0 ] )
widget = TextEditorWidget ( installed_plugins = plugins , pk = plugin . pk , placeholder = plugin . placeholder , plugin_language = plugin . language , configuration = self . ckeditor_configuration , render_plugin_url = render_plugin_url , cancel_url = cancel_url , action_token = action_token , delete_on_cancel = delete_text_on_cancel , )
return widget |
def projection_matrix ( w ) :
'''Return the projection matrix of a direction w .''' | return np . identity ( 3 ) - np . dot ( np . reshape ( w , ( 3 , 1 ) ) , np . reshape ( w , ( 1 , 3 ) ) ) |
def on_message ( self , client , userdata , msg ) :
"""Callback when the MQTT client received a new message .
: param client : the MQTT client .
: param userdata : unused .
: param msg : the MQTT message .""" | if msg is None :
return
self . log_info ( "New message on topic {}" . format ( msg . topic ) )
self . log_debug ( "Payload {}" . format ( msg . payload ) )
if msg . payload is None or len ( msg . payload ) == 0 :
pass
if msg . payload :
payload = json . loads ( msg . payload . decode ( 'utf-8' ) )
site_id = payload . get ( 'siteId' )
session_id = payload . get ( 'sessionId' )
if msg . topic is not None and msg . topic . startswith ( MQTT_TOPIC_INTENT ) and msg . payload :
payload = json . loads ( msg . payload . decode ( 'utf-8' ) )
intent = IntentParser . parse ( payload , self . registry . intent_classes )
self . log_debug ( "Parsed intent: {}" . format ( intent ) )
if self . handle_intent is not None :
if intent is not None :
self . log_debug ( "New intent: {}" . format ( str ( intent . intentName ) ) )
self . handle_intent ( intent , payload )
elif msg . topic is not None and msg . topic == MQTT_TOPIC_HOTWORD + "toggleOn" :
self . state_handler . set_state ( State . hotword_toggle_on )
elif MQTT_TOPIC_HOTWORD_DETECTED_RE . match ( msg . topic ) :
if not self . first_hotword_detected :
self . client . publish ( "hermes/feedback/sound/toggleOff" , payload = None , qos = 0 , retain = False )
self . first_hotword_detected = True
self . state_handler . set_state ( State . hotword_detected )
if self . handle_start_listening is not None :
self . handle_start_listening ( )
elif msg . topic == MQTT_TOPIC_ASR + "startListening" :
self . state_handler . set_state ( State . asr_start_listening )
elif msg . topic == MQTT_TOPIC_ASR + "textCaptured" :
self . state_handler . set_state ( State . asr_text_captured )
if msg . payload is not None :
self . log_debug ( "Text captured: {}" . format ( str ( msg . payload ) ) )
if self . handle_done_listening is not None :
self . handle_done_listening ( )
payload = json . loads ( msg . payload . decode ( 'utf-8' ) )
if payload [ 'text' ] == '' :
self . handle_intent ( None , None )
elif msg . topic is not None and msg . topic == "hermes/nlu/intentNotRecognized" :
self . handle_intent ( None , None )
elif msg . topic == "snipsmanager/setSnipsfile" and msg . payload :
self . state_handler . set_state ( State . asr_text_captured )
elif msg . topic == MQTT_TOPIC_SESSION_STARTED :
self . state_handler . set_state ( State . session_started )
if self . handlers_dialogue_events is not None :
self . handlers_dialogue_events ( self . DIALOGUE_EVENT_STARTED , session_id , site_id )
elif msg . topic == MQTT_TOPIC_SESSION_ENDED :
self . state_handler . set_state ( State . session_ended )
if self . handlers_dialogue_events is not None :
self . handlers_dialogue_events ( self . DIALOGUE_EVENT_ENDED , session_id , site_id )
elif msg . topic == MQTT_TOPIC_SESSION_QUEUED :
self . state_handler . set_state ( State . session_queued )
if self . handlers_dialogue_events is not None :
self . handlers_dialogue_events ( self . DIALOGUE_EVENT_QUEUED , session_id , site_id ) |
def to_example ( dictionary ) :
"""Helper : build tf . Example from ( string - > int / float / str list ) dictionary .""" | features = { }
for ( k , v ) in six . iteritems ( dictionary ) :
if not v :
raise ValueError ( "Empty generated field: %s" % str ( ( k , v ) ) )
if isinstance ( v [ 0 ] , six . integer_types ) :
features [ k ] = tf . train . Feature ( int64_list = tf . train . Int64List ( value = v ) )
elif isinstance ( v [ 0 ] , float ) :
features [ k ] = tf . train . Feature ( float_list = tf . train . FloatList ( value = v ) )
elif isinstance ( v [ 0 ] , six . string_types ) :
if not six . PY2 : # Convert in python 3.
v = [ bytes ( x , "utf-8" ) for x in v ]
features [ k ] = tf . train . Feature ( bytes_list = tf . train . BytesList ( value = v ) )
elif isinstance ( v [ 0 ] , bytes ) :
features [ k ] = tf . train . Feature ( bytes_list = tf . train . BytesList ( value = v ) )
else :
raise ValueError ( "Value for %s is not a recognized type; v: %s type: %s" % ( k , str ( v [ 0 ] ) , str ( type ( v [ 0 ] ) ) ) )
return tf . train . Example ( features = tf . train . Features ( feature = features ) ) |
def run_continuous ( self , scale ) :
"""Return a continuous solution to the RGE as ` RGsolution ` instance .""" | if scale == self . scale_in :
raise ValueError ( "The scale must be different from the input scale" )
elif scale < self . scale_in :
scale_min = scale
scale_max = self . scale_in
elif scale > self . scale_in :
scale_max = scale
scale_min = self . scale_in
fun = rge . smeft_evolve_continuous ( C_in = self . C_in , scale_in = self . scale_in , scale_out = scale )
return wilson . classes . RGsolution ( fun , scale_min , scale_max ) |
def Spitzglass_low ( SG , Tavg , L = None , D = None , P1 = None , P2 = None , Q = None , Ts = 288.7 , Ps = 101325. , Zavg = 1 , E = 1. ) :
r'''Calculation function for dealing with flow of a compressible gas in a
pipeline with the Spitzglass ( low pressure drop ) formula . Can calculate
any of the following , given all other inputs :
* Flow rate
* Upstream pressure
* Downstream pressure
* Diameter of pipe ( numerical solution )
* Length of pipe
A variety of different constants and expressions have been presented
for the Spitzglass ( low pressure drop ) formula . Here , the form as in [ 1 ] _
is used but with a more precise metric conversion from inches to m .
. . math : :
Q = 125.1060 E \ left ( \ frac { T _ s } { P _ s } \ right ) \ left [ \ frac { 2 ( P _ 1
- P _ 2 ) ( P _ s + 1210 ) } { L \ cdot { SG } \ cdot T _ { avg } Z _ { avg } ( 1 + 0.09144 / D
+ \ frac { 150 } { 127 } D ) } \ right ] ^ { 0.5 } D ^ { 2.5}
Parameters
SG : float
Specific gravity of fluid with respect to air at the reference
temperature and pressure ` Ts ` and ` Ps ` , [ - ]
Tavg : float
Average temperature of the fluid in the pipeline , [ K ]
L : float , optional
Length of pipe , [ m ]
D : float , optional
Diameter of pipe , [ m ]
P1 : float , optional
Inlet pressure to pipe , [ Pa ]
P2 : float , optional
Outlet pressure from pipe , [ Pa ]
Q : float , optional
Flow rate of gas through pipe , [ m ^ 3 / s ]
Ts : float , optional
Reference temperature for the specific gravity of the gas , [ K ]
Ps : float , optional
Reference pressure for the specific gravity of the gas , [ Pa ]
Zavg : float , optional
Average compressibility factor for gas , [ - ]
E : float , optional
Pipeline efficiency , a correction factor between 0 and 1
Returns
Q , P1 , P2 , D , or L : float
The missing input which was solved for [ base SI ]
Notes
This equation is often presented without any correction for reference
conditions for specific gravity .
This model is also presented in [ 2 ] _ with a leading constant of 5.69E - 2,
the same exponents as used here , units of mm ( diameter ) , kPa , km ( length ) ,
and flow in m ^ 3 / hour . However , it is believed to contain a typo , and gives
results < 1/3 of the correct values . It is also present in [ 2 ] _ in imperial
form ; this is believed correct , but makes a slight assumption not done in
[1 ] _ .
This model is present in [ 3 ] _ without reference corrections . The 1210
constant in [ 1 ] _ is an approximation necessary for the reference correction
to function without a square of the pressure difference . The GPSA version
is as follows , and matches this formulation very closely :
. . math : :
Q = 0.821 \ left [ \ frac { ( P _ 1 - P _ 2 ) D ^ 5 } { L \ cdot { SG }
(1 + 91.44 / D + 0.0018D ) } \ right ] ^ { 0.5}
The model is also shown in [ 4 ] _ , with diameter in inches , length in feet ,
flow in MMSCFD , pressure drop in inH2O , and a rounded leading constant of
0.09 ; this makes its predictions several percent higher than the model here .
Examples
> > > Spitzglass _ low ( D = 0.154051 , P1 = 6720.3199 , P2 = 0 , L = 54.864 , SG = 0.6 , Tavg = 288.7)
0.9488775242530617
References
. . [ 1 ] Coelho , Paulo M . , and Carlos Pinho . " Considerations about Equations
for Steady State Flow in Natural Gas Pipelines . " Journal of the
Brazilian Society of Mechanical Sciences and Engineering 29 , no . 3
( September 2007 ) : 262-73 . doi : 10.1590 / S1678-58782007000300005.
. . [ 2 ] Menon , E . Shashi . Gas Pipeline Hydraulics . 1st edition . Boca Raton ,
FL : CRC Press , 2005.
. . [ 3 ] GPSA . GPSA Engineering Data Book . 13th edition . Gas Processors
Suppliers Association , Tulsa , OK , 2012.
. . [ 4 ] PetroWiki . " Pressure Drop Evaluation along Pipelines " Accessed
September 11 , 2016 . http : / / petrowiki . org / Pressure _ drop _ evaluation _ along _ pipelines # Spitzglass _ equation _ 2.''' | c3 = 1.181102362204724409448818897637795275591
# 0.03 / inch or 150/127
c4 = 0.09144
c5 = 125.1060
if Q is None and ( None not in [ L , D , P1 , P2 ] ) :
return c5 * Ts / Ps * D ** 2.5 * E * ( ( ( P1 - P2 ) * 2 * ( Ps + 1210. ) ) / ( L * SG * Tavg * Zavg * ( 1 + c4 / D + c3 * D ) ) ) ** 0.5
elif D is None and ( None not in [ L , Q , P1 , P2 ] ) :
to_solve = lambda D : Q - Spitzglass_low ( SG = SG , Tavg = Tavg , L = L , D = D , P1 = P1 , P2 = P2 , Ts = Ts , Ps = Ps , Zavg = Zavg , E = E )
return newton ( to_solve , 0.5 )
elif P1 is None and ( None not in [ L , Q , D , P2 ] ) :
return 0.5 * ( 2.0 * D ** 6 * E ** 2 * P2 * Ts ** 2 * c5 ** 2 * ( Ps + 1210.0 ) + D ** 2 * L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg * c3 + D * L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg + L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg * c4 ) / ( D ** 6 * E ** 2 * Ts ** 2 * c5 ** 2 * ( Ps + 1210.0 ) )
elif P2 is None and ( None not in [ L , Q , D , P1 ] ) :
return 0.5 * ( 2.0 * D ** 6 * E ** 2 * P1 * Ts ** 2 * c5 ** 2 * ( Ps + 1210.0 ) - D ** 2 * L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg * c3 - D * L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg - L * Ps ** 2 * Q ** 2 * SG * Tavg * Zavg * c4 ) / ( D ** 6 * E ** 2 * Ts ** 2 * c5 ** 2 * ( Ps + 1210.0 ) )
elif L is None and ( None not in [ P2 , Q , D , P1 ] ) :
return 2.0 * D ** 6 * E ** 2 * Ts ** 2 * c5 ** 2 * ( P1 * Ps + 1210.0 * P1 - P2 * Ps - 1210.0 * P2 ) / ( Ps ** 2 * Q ** 2 * SG * Tavg * Zavg * ( D ** 2 * c3 + D + c4 ) )
else :
raise Exception ( 'This function solves for either flow, upstream \
pressure, downstream pressure, diameter, or length; all other inputs \
must be provided.' ) |
def _get_login_page ( self ) :
"""Go to the login page .""" | try :
raw_res = yield from self . _session . get ( HOME_URL , timeout = self . _timeout )
except OSError :
raise PyHydroQuebecError ( "Can not connect to login page" )
# Get login url
content = yield from raw_res . text ( )
soup = BeautifulSoup ( content , 'html.parser' )
form_node = soup . find ( 'form' , { 'name' : 'fm' } )
if form_node is None :
raise PyHydroQuebecError ( "No login form find" )
login_url = form_node . attrs . get ( 'action' )
if login_url is None :
raise PyHydroQuebecError ( "Can not found login url" )
return login_url |
def _Backward1_T_Ph ( P , h ) :
"""Backward equation for region 1 , T = f ( P , h )
Parameters
P : float
Pressure , [ MPa ]
h : float
Specific enthalpy , [ kJ / kg ]
Returns
T : float
Temperature , [ K ]
References
IAPWS , Revised Release on the IAPWS Industrial Formulation 1997 for the
Thermodynamic Properties of Water and Steam August 2007,
http : / / www . iapws . org / relguide / IF97 - Rev . html , Eq 11
Examples
> > > _ Backward1 _ T _ Ph ( 3,500)
391.798509
> > > _ Backward1 _ T _ Ph ( 80,1500)
611.041229""" | I = [ 0 , 0 , 0 , 0 , 0 , 0 , 1 , 1 , 1 , 1 , 1 , 1 , 1 , 2 , 2 , 3 , 3 , 4 , 5 , 6 ]
J = [ 0 , 1 , 2 , 6 , 22 , 32 , 0 , 1 , 2 , 3 , 4 , 10 , 32 , 10 , 32 , 10 , 32 , 32 , 32 , 32 ]
n = [ - 0.23872489924521e3 , 0.40421188637945e3 , 0.11349746881718e3 , - 0.58457616048039e1 , - 0.15285482413140e-3 , - 0.10866707695377e-5 , - 0.13391744872602e2 , 0.43211039183559e2 , - 0.54010067170506e2 , 0.30535892203916e2 , - 0.65964749423638e1 , 0.93965400878363e-2 , 0.11573647505340e-6 , - 0.25858641282073e-4 , - 0.40644363084799e-8 , 0.66456186191635e-7 , 0.80670734103027e-10 , - 0.93477771213947e-12 , 0.58265442020601e-14 , - 0.15020185953503e-16 ]
Pr = P / 1
nu = h / 2500
T = 0
for i , j , ni in zip ( I , J , n ) :
T += ni * Pr ** i * ( nu + 1 ) ** j
return T |
def is_namespace_preordered ( self , namespace_id_hash ) :
"""Given a namespace preorder hash , determine if it is preordered
at the current block .""" | namespace_preorder = self . get_namespace_preorder ( namespace_id_hash )
if namespace_preorder is None :
return False
else :
return True |
def get_scale_fac ( fig , fiducial_width = 8 , fiducial_height = 7 ) :
"""Gets a factor to scale fonts by for the given figure . The scale
factor is relative to a figure with dimensions
( ` fiducial _ width ` , ` fiducial _ height ` ) .""" | width , height = fig . get_size_inches ( )
return ( width * height / ( fiducial_width * fiducial_height ) ) ** 0.5 |
def as_dict ( self ) :
"""Return the constructor arguments as a dictionary ( effectively the
kwargs to the constructor ) .
: return : dict of constructor arguments
: rtype : dict""" | return { 'pip_version' : self . _pip_version , 'pip_url' : self . _pip_url , 'pip_requirement' : self . _pip_requirement , 'pkg_resources_version' : self . _pkg_resources_version , 'pkg_resources_url' : self . _pkg_resources_url , 'git_tag' : self . _git_tag , 'git_commit' : self . _git_commit , 'git_remotes' : self . _git_remotes , 'git_is_dirty' : self . _git_is_dirty , } |
def _construct_grover_circuit ( self ) -> None :
"""Constructs an instance of Grover ' s Algorithm , using initialized values .
: return : None""" | oracle = Program ( )
oracle_name = "GROVER_ORACLE"
oracle . defgate ( oracle_name , self . unitary_function_mapping )
oracle . inst ( tuple ( [ oracle_name ] + self . qubits ) )
self . grover_circuit = self . oracle_grover ( oracle , self . qubits ) |
def create_or_update_detail ( self , request ) :
"""Implements Create / Update an object completely given an id
maps to PUT / api / object / : id in rest semantics
: param request : rip . Request
: return : rip . Response""" | pipeline = crud_pipeline_factory . create_or_update_detail_pipeline ( configuration = self . configuration )
return pipeline ( request = request ) |
def cli_forms ( self , * args ) :
"""List all available form definitions""" | forms = [ ]
missing = [ ]
for key , item in schemastore . items ( ) :
if 'form' in item and len ( item [ 'form' ] ) > 0 :
forms . append ( key )
else :
missing . append ( key )
self . log ( 'Schemata with form:' , forms )
self . log ( 'Missing forms:' , missing ) |
def to_naf ( self ) :
"""Converts the element to KAF""" | if self . type == 'KAF' : # # convert all the properties
for node in self . node . findall ( 'properties/property' ) :
node . set ( 'id' , node . get ( 'pid' ) )
del node . attrib [ 'pid' ] |
def potential_radiation ( dates , lon , lat , timezone , terrain_slope = 0 , terrain_slope_azimuth = 0 , cloud_fraction = 0 , split = False ) :
"""Calculate potential shortwave radiation for a specific location and time .
This routine calculates global radiation as described in :
Liston , G . E . and Elder , K . ( 2006 ) : A Meteorological Distribution System for
High - Resolution Terrestrial Modeling ( MicroMet ) , J . Hydrometeorol . , 7 , 217–234.
Corrections for eccentricity are carried out following :
Paltridge , G . W . , Platt , C . M . R . , 1976 . Radiative processes in Meteorology and Climatology .
Elsevier Scientific Publishing Company , Amsterdam , Oxford , New York .
Parameters
dates : DatetimeIndex or array - like
The dates for which potential radiation shall be calculated
lon : float
Longitude ( degrees )
lat : float
Latitude ( degrees )
timezone : float
Time zone
terrain _ slope : float , default 0
Terrain slope as defined in Liston & Elder ( 2006 ) ( eq . 12)
terrain _ slope _ azimuth : float , default 0
Terrain slope azimuth as defined in Liston & Elder ( 2006 ) ( eq . 13)
cloud _ fraction : float , default 0
Cloud fraction between 0 and 1
split : boolean , default False
If True , return a DataFrame containing direct and diffuse radiation ,
otherwise return a Series containing total radiation""" | solar_constant = 1367.
days_per_year = 365.25
tropic_of_cancer = np . deg2rad ( 23.43697 )
solstice = 173.0
dates = pd . DatetimeIndex ( dates )
dates_hour = np . array ( dates . hour )
dates_minute = np . array ( dates . minute )
day_of_year = np . array ( dates . dayofyear )
# compute solar decline in rad
solar_decline = tropic_of_cancer * np . cos ( 2.0 * np . pi * ( day_of_year - solstice ) / days_per_year )
# compute the sun hour angle in rad
standard_meridian = timezone * 15.
delta_lat_time = ( lon - standard_meridian ) * 24. / 360.
hour_angle = np . pi * ( ( ( dates_hour + dates_minute / 60. + delta_lat_time ) / 12. ) - 1. )
# get solar zenith angle
cos_solar_zenith = ( np . sin ( solar_decline ) * np . sin ( np . deg2rad ( lat ) ) + np . cos ( solar_decline ) * np . cos ( np . deg2rad ( lat ) ) * np . cos ( hour_angle ) )
cos_solar_zenith = cos_solar_zenith . clip ( min = 0 )
solar_zenith_angle = np . arccos ( cos_solar_zenith )
# compute transmissivities for direct and diffus radiation using cloud fraction
transmissivity_direct = ( 0.6 + 0.2 * cos_solar_zenith ) * ( 1.0 - cloud_fraction )
transmissivity_diffuse = ( 0.3 + 0.1 * cos_solar_zenith ) * cloud_fraction
# modify solar constant for eccentricity
beta = 2. * np . pi * ( day_of_year / days_per_year )
radius_ratio = ( 1.00011 + 0.034221 * np . cos ( beta ) + 0.00128 * np . sin ( beta ) + 0.000719 * np . cos ( 2. * beta ) + 0.000077 * np . sin ( 2 * beta ) )
solar_constant_times_radius_ratio = solar_constant * radius_ratio
mu = np . arcsin ( np . cos ( solar_decline ) * np . sin ( hour_angle ) / np . sin ( solar_zenith_angle ) )
cosi = ( np . cos ( terrain_slope ) * cos_solar_zenith + np . sin ( terrain_slope ) * np . sin ( solar_zenith_angle ) * np . cos ( mu - terrain_slope_azimuth ) )
# get total shortwave radiation
direct_radiation = solar_constant_times_radius_ratio * transmissivity_direct * cosi
diffuse_radiation = solar_constant_times_radius_ratio * transmissivity_diffuse * cos_solar_zenith
direct_radiation = direct_radiation . clip ( min = 0 )
df = pd . DataFrame ( index = dates , data = dict ( direct = direct_radiation , diffuse = diffuse_radiation ) )
if split :
return df
else :
return df . direct + df . diffuse |
def _correct_for_light_travel_time ( observer , target ) :
"""Return a light - time corrected astrometric position and velocity .
Given an ` observer ` that is a ` Barycentric ` position somewhere in
the solar system , compute where in the sky they will see the body
` target ` , by computing the light - time between them and figuring out
where ` target ` was back when the light was leaving it that is now
reaching the eyes or instruments of the ` observer ` .""" | t = observer . t
ts = t . ts
cposition = observer . position . au
cvelocity = observer . velocity . au_per_d
tposition , tvelocity , gcrs_position , message = target . _at ( t )
distance = length_of ( tposition - cposition )
light_time0 = 0.0
t_tdb = t . tdb
for i in range ( 10 ) :
light_time = distance / C_AUDAY
delta = light_time - light_time0
if - 1e-12 < min ( delta ) and max ( delta ) < 1e-12 :
break
t2 = ts . tdb ( jd = t_tdb - light_time )
tposition , tvelocity , gcrs_position , message = target . _at ( t2 )
distance = length_of ( tposition - cposition )
light_time0 = light_time
else :
raise ValueError ( 'light-travel time failed to converge' )
return tposition - cposition , tvelocity - cvelocity , t , light_time |
def keys_at ( self , depth , counter = 1 ) :
"""Iterate keys at specified depth .""" | if depth < 1 :
yield ROOT
else :
if counter == depth :
for key in self . keys ( ) :
yield key
else :
counter += 1
for dict_tree in self . values ( ) :
for key in dict_tree . keys_at ( depth , counter ) :
yield key |
def set ( self , value ) :
"""Sets the value of the object
: param value :
A byte string""" | if not isinstance ( value , byte_cls ) :
raise TypeError ( unwrap ( '''
%s value must be a byte string, not %s
''' , type_name ( self ) , type_name ( value ) ) )
self . _bytes = value
self . contents = value
self . _header = None
if self . _indefinite :
self . _indefinite = False
self . method = 0
if self . _trailer != b'' :
self . _trailer = b'' |
def check_frequencies ( feed : "Feed" , * , as_df : bool = False , include_warnings : bool = False ) -> List :
"""Analog of : func : ` check _ agency ` for ` ` feed . frequencies ` ` .""" | table = "frequencies"
problems = [ ]
# Preliminary checks
if feed . frequencies is None :
return problems
f = feed . frequencies . copy ( )
problems = check_for_required_columns ( problems , table , f )
if problems :
return format_problems ( problems , as_df = as_df )
if include_warnings :
problems = check_for_invalid_columns ( problems , table , f )
# Check trip _ id
problems = check_column_linked_id ( problems , table , f , "trip_id" , feed . trips )
# Check start _ time and end _ time
time_cols = [ "start_time" , "end_time" ]
for col in time_cols :
problems = check_column ( problems , table , f , col , valid_time )
for col in time_cols :
f [ col ] = f [ col ] . map ( hp . timestr_to_seconds )
# Start _ time should be earlier than end _ time
cond = f [ "start_time" ] >= f [ "end_time" ]
problems = check_table ( problems , table , f , cond , "start_time not earlier than end_time" )
# Headway periods should not overlap
f = f . sort_values ( [ "trip_id" , "start_time" ] )
for __ , group in f . groupby ( "trip_id" ) :
a = group [ "start_time" ] . values
b = group [ "end_time" ] . values
indices = np . flatnonzero ( a [ 1 : ] < b [ : - 1 ] ) . tolist ( )
if indices :
problems . append ( [ "error" , "Headway periods for the same trip overlap" , table , indices , ] )
# Check headway _ secs
v = lambda x : x >= 0
problems = check_column ( problems , table , f , "headway_secs" , v )
# Check exact _ times
v = lambda x : x in range ( 2 )
problems = check_column ( problems , table , f , "exact_times" , v , column_required = False )
return format_problems ( problems , as_df = as_df ) |
def prepack ( self , namedstruct , skip_self = False , skip_sub = False ) :
'''Run prepack''' | if not skip_sub and self . header is not None and hasattr ( self . header , 'fullprepack' ) :
self . header . fullprepack ( namedstruct . _seqs [ 0 ] )
Parser . prepack ( self , namedstruct , skip_self , skip_sub ) |
def _create_parsing_plan ( self , desired_type : Type [ T ] , filesystem_object : PersistedObject , logger : Logger , log_only_last : bool = False ) -> ParsingPlan [ T ] :
"""Creates a parsing plan to parse the given filesystem object into the given desired _ type .
This overrides the method in AnyParser , in order to provide a ' cascading ' parsing plan
: param desired _ type :
: param filesystem _ object :
: param logger :
: param log _ only _ last : a flag to only log the last part of the file path ( default False )
: return :""" | # build the parsing plan
logger . debug ( '(B) ' + get_parsing_plan_log_str ( filesystem_object , desired_type , log_only_last = log_only_last , parser = self ) )
return CascadingParser . CascadingParsingPlan ( desired_type , filesystem_object , self , self . _parsers_list , logger = logger ) |
def pull_byte ( self , stack_pointer ) :
"""pulled a byte from stack""" | addr = stack_pointer . value
byte = self . memory . read_byte ( addr )
# log . info (
# log . error (
# " % x | \ tpull $ % x from % s stack at $ % x \ t | % s " ,
# self . last _ op _ address , byte , stack _ pointer . name , addr ,
# self . cfg . mem _ info . get _ shortest ( self . last _ op _ address )
# FIXME : self . system _ stack _ pointer + = 1
stack_pointer . increment ( 1 )
return byte |
def export ( * pools , ** kwargs ) :
'''. . versionadded : : 2015.5.0
Export storage pools
pools : string
One or more storage pools to export
force : boolean
Force export of storage pools
CLI Example :
. . code - block : : bash
salt ' * ' zpool . export myzpool . . . [ force = True | False ]
salt ' * ' zpool . export myzpool2 myzpool2 . . . [ force = True | False ]''' | # # Configure pool
# NOTE : initialize the defaults
flags = [ ]
targets = [ ]
# NOTE : set extra config based on kwargs
if kwargs . get ( 'force' , False ) :
flags . append ( '-f' )
# NOTE : append the pool name and specifications
targets = list ( pools )
# # Export pools
res = __salt__ [ 'cmd.run_all' ] ( __utils__ [ 'zfs.zpool_command' ] ( command = 'export' , flags = flags , target = targets , ) , python_shell = False , )
return __utils__ [ 'zfs.parse_command_result' ] ( res , 'exported' ) |
def check_type_of_param_list_elements ( param_list ) :
"""Ensures that all elements of param _ list are ndarrays or None . Raises a
helpful ValueError if otherwise .""" | try :
assert isinstance ( param_list [ 0 ] , np . ndarray )
assert all ( [ ( x is None or isinstance ( x , np . ndarray ) ) for x in param_list ] )
except AssertionError :
msg = "param_list[0] must be a numpy array."
msg_2 = "All other elements must be numpy arrays or None."
total_msg = msg + "\n" + msg_2
raise TypeError ( total_msg )
return None |
def media_type_str ( mediatype ) :
"""Convert internal API media type to string .""" | if mediatype == const . MEDIA_TYPE_UNKNOWN :
return 'Unknown'
if mediatype == const . MEDIA_TYPE_VIDEO :
return 'Video'
if mediatype == const . MEDIA_TYPE_MUSIC :
return 'Music'
if mediatype == const . MEDIA_TYPE_TV :
return 'TV'
return 'Unsupported' |
def __create_price_for ( self , commodity : Commodity , price : PriceModel ) :
"""Creates a new Price entry in the book , for the given commodity""" | logging . info ( "Adding a new price for %s, %s, %s" , commodity . mnemonic , price . datetime . strftime ( "%Y-%m-%d" ) , price . value )
# safety check . Compare currencies .
sec_svc = SecurityAggregate ( self . book , commodity )
currency = sec_svc . get_currency ( )
if currency != price . currency :
raise ValueError ( "Requested currency does not match the currency previously used" , currency , price . currency )
# Description of the source field values :
# https : / / www . gnucash . org / docs / v2.6 / C / gnucash - help / tool - price . html
new_price = Price ( commodity , currency , price . datetime . date ( ) , price . value , source = "Finance::Quote" )
commodity . prices . append ( new_price ) |
def subscribe_notice ( self , access_token ) :
"""doc : http : / / open . youku . com / docs / doc ? id = 31""" | url = 'https://openapi.youku.com/v2/users/subscribe/notice.json'
params = { 'client_id' : self . client_id , 'access_token' : access_token }
r = requests . get ( url , params = params )
check_error ( r )
return r . json ( ) |
def make_article_info ( self ) :
"""The Article Info contains the ( self ) Citation , Editors , Dates ,
Copyright , Funding Statement , Competing Interests Statement ,
Correspondence , and Footnotes . Maybe more . . .
This content follows the Heading and precedes the Main segment in the
output .
This function accepts the receiving _ node argument , which will receive
all generated output as new childNodes .""" | body = self . main . getroot ( ) . find ( 'body' )
# Create a div for ArticleInfo , exposing it to linking and formatting
article_info_div = etree . Element ( 'div' , { 'id' : 'ArticleInfo' } )
body . insert ( 1 , article_info_div )
# Creation of the self Citation
article_info_div . append ( self . make_article_info_citation ( ) )
# Creation of the Editors
editors = self . article . root . xpath ( "./front/article-meta/contrib-group/contrib[@contrib-type='editor']" )
self . make_article_info_editors ( editors , article_info_div )
# Creation of the important Dates segment
article_info_div . append ( self . make_article_info_dates ( ) )
# Creation of the Copyright statement
self . make_article_info_copyright ( article_info_div )
# Creation of the Funding statement
self . make_article_info_funding ( article_info_div )
# Creation of the Competing Interests statement
self . make_article_info_competing_interests ( article_info_div )
# Creation of the Correspondences ( contact information ) for the article
self . make_article_info_correspondences ( article_info_div )
# Creation of the Footnotes ( other ) for the ArticleInfo
self . make_article_info_footnotes_other ( article_info_div ) |
def call_copy_numbers ( seg_file , work_dir , data ) :
"""Call copy numbers from a normalized and segmented input file .""" | out_file = os . path . join ( work_dir , "%s-call.seg" % dd . get_sample_name ( data ) )
if not utils . file_exists ( out_file ) :
with file_transaction ( data , out_file ) as tx_out_file :
params = [ "-T" , "CallCopyRatioSegments" , "-I" , seg_file , "-O" , tx_out_file ]
_run_with_memory_scaling ( params , tx_out_file , data )
return out_file |
def storeIDToWebID ( key , storeid ) :
"""Takes a key ( int ) and storeid ( int ) and produces a webid ( a 16 - character
str suitable for including in URLs )""" | i = key ^ storeid
l = list ( '%0.16x' % ( i , ) )
for nybbleid in range ( 0 , 8 ) :
a , b = _swapat ( key , nybbleid )
_swap ( l , a , b )
return '' . join ( l ) |
async def remove ( request : web . Request ) -> web . Response :
"""Remove a public key from authorized _ keys
DELETE / server / ssh _ keys / : key _ md5 _ hexdigest
- > 200 OK if the key was found
- > 404 Not Found otherwise""" | requested_hash = request . match_info [ 'key_md5' ]
new_keys : List [ str ] = [ ]
found = False
for keyhash , key in get_keys ( ) :
if keyhash == requested_hash :
found = True
else :
new_keys . append ( key )
if not found :
return web . json_response ( data = { 'error' : 'invalid-key-hash' , 'message' : f'No such key md5 {requested_hash}' } , status = 404 )
with authorized_keys ( 'w' ) as ak :
ak . write ( '\n' . join ( new_keys ) + '\n' )
return web . json_response ( data = { 'message' : f'Key {requested_hash} deleted. ' 'Restart robot to take effect' , 'restart_url' : '/server/restart' } , status = 200 ) |
def merge_unique_identities ( db , from_uuid , to_uuid ) :
"""Merge one unique identity into another .
Use this function to join ' from _ uuid ' unique identity into
' to _ uuid ' . Identities and enrollments related to ' from _ uuid ' will be
assigned to ' to _ uuid ' . In addition , ' from _ uuid ' will be removed
from the registry . Duplicated enrollments will be also removed from
the registry while overlapped enrollments will be merged .
This function also merges two profiles . When a field on ' to _ uuid '
profile is None or empty , it will be updated with the value on the
profile of ' from _ uuid ' . If any of the two unique identities was set
as a bot , the new profile will also be set as a bot .
When ' from _ uuid ' and ' to _ uuid ' are equal , the action does not have any
effect .
The function raises a ' NotFoundError exception when either ' from _ uuid '
or ' to _ uuid ' do not exist in the registry .
: param from _ uuid : identifier of the unique identity set to merge
: param to _ uuid : identifier of the unique identity where ' from _ uuid '
will be merged
: raises NotFoundError : raised when either ' from _ uuid ' or ' to _ uuid '
do not exist in the registry""" | with db . connect ( ) as session :
fuid = find_unique_identity ( session , from_uuid )
tuid = find_unique_identity ( session , to_uuid )
if not fuid :
raise NotFoundError ( entity = from_uuid )
if from_uuid == to_uuid :
return
if not tuid :
raise NotFoundError ( entity = to_uuid )
# Update profile information
if tuid . profile and fuid . profile : # Update data giving priority to ' to _ uuid ' .
# When ' is _ bot ' is set to True in any of the unique identities
# it will remain the same .
profile_data = { }
if not tuid . profile . name :
profile_data [ 'name' ] = fuid . profile . name
if not tuid . profile . email :
profile_data [ 'email' ] = fuid . profile . email
if not tuid . profile . country_code :
profile_data [ 'country_code' ] = fuid . profile . country_code
if not tuid . profile . gender :
profile_data [ 'gender' ] = fuid . profile . gender
profile_data [ 'gender_acc' ] = fuid . profile . gender_acc
if fuid . profile . is_bot :
profile_data [ 'is_bot' ] = True
edit_profile_db ( session , tuid , ** profile_data )
# Update identities
for identity in fuid . identities :
move_identity_db ( session , identity , tuid )
# Move those enrollments that to _ uid does not have .
# It is needed to copy the list in - place to avoid
# sync problems when enrollments are moved .
for rol in fuid . enrollments [ : ] :
enrollment = session . query ( Enrollment ) . filter ( Enrollment . uidentity == tuid , Enrollment . organization == rol . organization , Enrollment . start == rol . start , Enrollment . end == rol . end ) . first ( )
if not enrollment :
move_enrollment_db ( session , rol , tuid )
# For some reason , uuid are not updated until changes are
# committed ( flush does nothing ) . Force to commit changes
# to avoid deletion of identities when removing ' fuid '
session . commit ( )
delete_unique_identity_db ( session , fuid )
# Retrieve of organizations to merge the enrollments ,
# before closing the session
query = session . query ( Organization . name ) . join ( Enrollment ) . filter ( Enrollment . uidentity == tuid ) . distinct ( )
orgs = [ org . name for org in query ]
# Merge enrollments
for org in orgs :
merge_enrollments ( db , to_uuid , org ) |
def copyFile ( input , output , replace = None ) :
"""Copy a file whole from input to output .""" | _found = findFile ( output )
if not _found or ( _found and replace ) :
shutil . copy2 ( input , output ) |
def ensure_utf8 ( str_or_unicode ) :
"""tests , if the input is ` ` str ` ` or ` ` unicode ` ` . if it is ` ` unicode ` ` ,
it will be encoded from ` ` unicode ` ` to ` ` utf - 8 ` ` . otherwise , the
input string is returned .""" | if isinstance ( str_or_unicode , str ) :
return str_or_unicode
elif isinstance ( str_or_unicode , unicode ) :
return str_or_unicode . encode ( 'utf-8' )
else :
raise ValueError ( "Input '{0}' should be a string or unicode, but it is of " "type {1}" . format ( str_or_unicode , type ( str_or_unicode ) ) ) |
def list_get ( self , num_iids , fields = [ ] , session = None ) :
'''taobao . items . list . get 批量获取商品信息
查看非公开属性时需要用户登录''' | request = TOPRequest ( 'taobao.items.list.get' )
request [ 'num_iids' ] = num_iids
if not fields :
item = Item ( )
fields = item . fields
request [ 'fields' ] = fields
self . create ( self . execute ( request , session ) )
return self . items |
def _initialize_expectations ( self , config = None , data_asset_name = None ) :
"""Instantiates ` _ expectations _ config ` as empty by default or with a specified expectation ` config ` .
In addition , this always sets the ` default _ expectation _ args ` to :
` include _ config ` : False ,
` catch _ exceptions ` : False ,
` output _ format ` : ' BASIC '
By default , initializes data _ asset _ type to the name of the implementing class , but subclasses
that have interoperable semantics ( e . g . Dataset ) may override that parameter to clarify their
interoperability .
Args :
config ( json ) : A json - serializable expectation config . If None , creates default ` _ expectations _ config ` with an empty list of expectations and key value ` data _ asset _ name ` as ` data _ asset _ name ` .
data _ asset _ name ( string ) : The name to assign to ` _ expectations _ config . data _ asset _ name ` if ` config ` is not provided .""" | if config != None : # ! ! ! Should validate the incoming config with jsonschema here
# Copy the original so that we don ' t overwrite it by accident
# Pandas incorrectly interprets this as an attempt to create a column and throws up a warning . Suppress it
# since we are subclassing .
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , category = UserWarning )
self . _expectations_config = DotDict ( copy . deepcopy ( config ) )
if data_asset_name is not None :
self . _expectations_config [ "data_asset_name" ] = data_asset_name
else : # Pandas incorrectly interprets this as an attempt to create a column and throws up a warning . Suppress it
# since we are subclassing .
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , category = UserWarning )
self . _expectations_config = DotDict ( { "data_asset_name" : data_asset_name , "data_asset_type" : self . __class__ . __name__ , "meta" : { "great_expectations.__version__" : __version__ } , "expectations" : [ ] } )
# Pandas incorrectly interprets this as an attempt to create a column and throws up a warning . Suppress it
# since we are subclassing .
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , category = UserWarning )
self . default_expectation_args = { "include_config" : False , "catch_exceptions" : False , "result_format" : 'BASIC' , } |
def filterAll ( self , ** kwargs ) :
'''filterAll aka filterAllAnd - Perform a filter operation on ALL nodes in this collection and all their children .
Results must match ALL the filter criteria . for ANY , use the * Or methods
For just the nodes in this collection , use " filter " or " filterAnd " on a TagCollection
For special filter keys , @ see # AdvancedHTMLParser . AdvancedHTMLParser . filter
Requires the QueryableList module to be installed ( i . e . AdvancedHTMLParser was installed
without ' - - no - deps ' flag . )
For alternative without QueryableList ,
consider # AdvancedHTMLParser . AdvancedHTMLParser . find method or the getElement * methods
@ return TagCollection < AdvancedTag >''' | if canFilterTags is False :
raise NotImplementedError ( 'filter methods requires QueryableList installed, it is not. Either install QueryableList, or try the less-robust "find" method, or the getElement* methods.' )
allNodes = self . getAllNodes ( )
filterableNodes = FilterableTagCollection ( allNodes )
return filterableNodes . filterAnd ( ** kwargs ) |
def fold ( self , elems ) :
"""Perform constant folding . If the result of applying the
operator to the elements would be a fixed constant value ,
returns the result of applying the operator to the operands .
Otherwise , returns an instance of ` ` Instructions ` ` containing
the instructions necessary to apply the operator .
: param elems : A list ( or list - like object ) containing the
elements .
: returns : A list of one element , containing the instructions
necessary to implement the operator .""" | cond , if_true , if_false = elems
if isinstance ( cond , Constant ) :
return [ if_true if cond . value else if_false ]
return [ Instructions ( [ cond , JumpIfNot ( len ( if_true ) + 2 ) , pop , if_true , Jump ( len ( if_false ) + 1 ) , pop , if_false ] ) ] |
def adaptive_gaussian_prior_builder ( getter , name , * args , ** kwargs ) :
"""A pre - canned builder for adaptive scalar gaussian prior distributions .
Given a true ` getter ` function and arguments forwarded from ` tf . get _ variable ` ,
return a distribution object for a scalar - valued adaptive gaussian prior
which will be broadcast over a variable of the requisite shape . This prior ' s
parameters ( e . g ` loc ` and ` scale ` for a gaussian ) will consist of a single
learned scalar for the entire ` tf . Variable ` for which it serves as the prior ,
regardless of that ` tf . Variable ` ' s shape .
Args :
getter : The ` getter ` passed to a ` custom _ getter ` . Please see the
documentation for ` tf . get _ variable ` .
name : The ` name ` argument passed to ` tf . get _ variable ` .
* args : See positional arguments passed to ` tf . get _ variable ` .
* * kwargs : See keyword arguments passed to ` tf . get _ variable ` .
Returns :
An instance of ` tfp . distributions . Normal ` representing the prior
distribution over the variable in question .""" | kwargs [ "shape" ] = ( )
loc_var = getter ( name + "_prior_loc" , * args , ** kwargs )
kwargs [ "initializer" ] = scale_variable_initializer ( 0.01 )
scale_var = getter ( name + "_prior_scale" , * args , ** kwargs )
prior = tfp . distributions . Normal ( loc = loc_var , scale = tf . nn . softplus ( scale_var ) , name = "{}_prior_dist" . format ( name ) )
return prior |
def get_config_value ( self , section , name = None , config_file = None ) :
"""Returns configuration value for a given [ ` ` section ` ` ] and ` ` name ` ` .
: param section : Section we want to retrieve value from
: param name : Name of configuration we want to retrieve
: param config _ file : A path to file which should be used to retrieve
configuration from ( might also be a list of file paths )""" | if config_file is None :
config_file = [ ]
elif isinstance ( config_file , basestring ) :
config_file = [ config_file ]
config = self . _repo . ui
for path in config_file :
config . readconfig ( path )
return config . config ( section , name ) |
def hex_timestamp_to_datetime ( hex_timestamp ) :
"""Converts hex timestamp to a datetime object .
> > > hex _ timestamp _ to _ datetime ( ' 558BBCF9 ' )
datetime . datetime ( 2015 , 6 , 25 , 8 , 34 , 1)
> > > hex _ timestamp _ to _ datetime ( ' 0x558BBCF9 ' )
datetime . datetime ( 2015 , 6 , 25 , 8 , 34 , 1)
> > > datetime . fromtimestamp ( 0x558BBCF9)
datetime . datetime ( 2015 , 6 , 25 , 8 , 34 , 1)""" | if not hex_timestamp . startswith ( '0x' ) :
hex_timestamp = '0x{0}' . format ( hex_timestamp )
return datetime . fromtimestamp ( int ( hex_timestamp , 16 ) ) |
def show_network ( self , network , ** _params ) :
"""Fetches information of a certain network .""" | return self . get ( self . network_path % ( network ) , params = _params ) |
def auprc ( y_true , y_pred ) :
"""Area under the precision - recall curve""" | y_true , y_pred = _mask_value_nan ( y_true , y_pred )
precision , recall , _ = skm . precision_recall_curve ( y_true , y_pred )
return skm . auc ( recall , precision ) |
def range_is_obj ( rng , rdfclass ) :
"""Test to see if range for the class should be an object
or a litteral""" | if rng == 'rdfs_Literal' :
return False
if hasattr ( rdfclass , rng ) :
mod_class = getattr ( rdfclass , rng )
for item in mod_class . cls_defs [ 'rdf_type' ] :
try :
if issubclass ( getattr ( rdfclass , item ) , rdfclass . rdfs_Literal ) :
return False
except AttributeError :
pass
if isinstance ( mod_class , rdfclass . RdfClassMeta ) :
return True
return False |
def content ( self , path = None , overwrite = True , encoding = 'utf-8' ) :
"""Downloads file to the specified path or as temporary file
and reads the file content in memory .
Should not be used on very large files .
: param path : Path for file download If omitted tmp file will be used .
: param overwrite : Overwrite file if exists locally
: param encoding : File encoding , by default it is UTF - 8
: return : File content .""" | if path :
self . download ( wait = True , path = path , overwrite = overwrite )
with io . open ( path , 'r' , encoding = encoding ) as fp :
return fp . read ( )
with tempfile . NamedTemporaryFile ( ) as tmpfile :
self . download ( wait = True , path = tmpfile . name , overwrite = overwrite )
with io . open ( tmpfile . name , 'r' , encoding = encoding ) as fp :
return fp . read ( ) |
def fixminimized ( self , alphabet ) :
"""After pyfst minimization ,
all unused arcs are removed ,
and all sink states are removed .
However this may break compatibility .
Args :
alphabet ( list ) : The input alphabet
Returns :
None""" | endstate = len ( list ( self . states ) )
for state in self . states :
for char in alphabet :
found = 0
for arc in state . arcs :
if self . isyms . find ( arc . ilabel ) == char :
found = 1
break
if found == 0 :
self . add_arc ( state . stateid , endstate , char )
self [ endstate ] . final = TropicalWeight ( float ( 'inf' ) )
for char in alphabet :
self . add_arc ( endstate , endstate , char ) |
def query_disease_comment ( ) :
"""Returns list of diseases comments by query parameters
tags :
- Query functions
parameters :
- name : comment
in : query
type : string
required : false
description : Comment on disease linked to UniProt entry
default : ' % mutations % '
- name : entry _ name
in : query
type : string
required : false
description : reference identifier
default : A4 _ HUMAN
- name : limit
in : query
type : integer
required : false
description : limit of results numbers
default : 10""" | args = get_args ( request_args = request . args , allowed_str_args = [ 'comment' , 'entry_name' ] , allowed_int_args = [ 'limit' ] )
return jsonify ( query . disease_comment ( ** args ) ) |
def invers ( self ) :
"""Return the invers matrix , if it can be calculated
: return : Returns a new Matrix containing the invers
: rtype : Matrix
: raise : Raises an : py : exc : ` ValueError ` if the matrix is not inversible
: note : Only a squared matrix with a determinant ! = 0 can be inverted .
: todo : Reduce amount of create and copy operations""" | if self . _columns != self . _rows :
raise ValueError ( "A square matrix is needed" )
mArray = self . get_array ( False )
appList = [ 0 ] * self . _columns
# add identity matrix to array in order to use gauss jordan algorithm
for col in xrange ( self . _columns ) :
mArray . append ( appList [ : ] )
mArray [ self . _columns + col ] [ col ] = 1
# create new Matrix and execute gass jordan algorithm
exMatrix = Matrix . from_two_dim_array ( 2 * self . _columns , self . _rows , mArray )
gjResult = exMatrix . gauss_jordan ( )
# remove identity matrix from left side
# TODO Implement slicing directly for Matrix
gjResult . matrix = gjResult . matrix [ self . _columns : ]
gjResult . _columns = len ( gjResult . matrix )
return gjResult |
def network_sampling ( n , filename , directory = None , snowball = False , user = None ) :
"""Selects a few users and exports a CSV of indicators for them .
TODO : Returns the network / graph between the selected users .
Parameters
n : int
Number of users to select .
filename : string
File to export to .
directory : string
Directory to select users from if using the default random selection .
snowball : starts from a specified user , iterates over neighbors , and does a
BFS until n neighbors are reached""" | if snowball :
if user is None :
raise ValueError ( "Must specify a starting user from whom to initiate the snowball" )
else :
users , agenda = [ user ] , [ user ]
while len ( agenda ) > 0 :
parent = agenda . pop ( )
dealphebetized_network = sorted ( parent . network . items ( ) , key = lambda k : random . random ( ) )
for neighbor in dealphebetized_network :
if neighbor [ 1 ] not in users and neighbor [ 1 ] is not None and len ( users ) < n :
users . append ( neighbor [ 1 ] )
if neighbor [ 1 ] . network :
agenda . push ( neighbor [ 1 ] )
else :
files = [ x for x in os . listdir ( directory ) if os . path . isfile ( os . path . join ( directory , x ) ) ]
shuffled_files = sorted ( files , key = lambda k : random . random ( ) )
user_names = shuffled_files [ : n ]
users = [ bc . read_csv ( u [ : - 4 ] , directory ) for u in user_names ]
if len ( users ) < n :
raise ValueError ( "Specified more users than records that exist, only {} records available" . format ( len ( users ) ) )
bc . to_csv ( [ bc . utils . all ( u ) for u in users ] , filename ) |
def execute ( self , query , params = None , cursor = None ) :
"""Execute query in pool .
Returns future yielding closed cursor .
You can get rows , lastrowid , etc from the cursor .
: param cursor : cursor class ( Cursor , DictCursor . etc . )
: return : Future of cursor
: rtype : Future""" | conn = yield self . _get_conn ( )
try :
cur = conn . cursor ( cursor )
yield cur . execute ( query , params )
yield cur . close ( )
except :
self . _close_conn ( conn )
raise
else :
self . _put_conn ( conn )
raise Return ( cur ) |
def add_state_editor ( self , state_m ) :
"""Triggered whenever a state is selected .
: param state _ m : The selected state model .""" | state_identifier = self . get_state_identifier ( state_m )
if state_identifier in self . closed_tabs :
state_editor_ctrl = self . closed_tabs [ state_identifier ] [ 'controller' ]
state_editor_view = state_editor_ctrl . view
handler_id = self . closed_tabs [ state_identifier ] [ 'source_code_changed_handler_id' ]
source_code_view_is_dirty = self . closed_tabs [ state_identifier ] [ 'source_code_view_is_dirty' ]
del self . closed_tabs [ state_identifier ]
# pages not in self . closed _ tabs and self . tabs at the same time
else :
state_editor_view = StateEditorView ( )
if isinstance ( state_m , LibraryStateModel ) :
state_editor_view [ 'main_notebook_1' ] . set_current_page ( state_editor_view [ 'main_notebook_1' ] . page_num ( state_editor_view . page_dict [ "Data Linkage" ] ) )
state_editor_ctrl = StateEditorController ( state_m , state_editor_view )
self . add_controller ( state_identifier , state_editor_ctrl )
if state_editor_ctrl . get_controller ( 'source_ctrl' ) and state_m . state . get_next_upper_library_root_state ( ) is None : # observe changed to set the mark dirty flag
handler_id = state_editor_view . source_view . get_buffer ( ) . connect ( 'changed' , self . script_text_changed , state_m )
self . view . get_top_widget ( ) . connect ( 'draw' , state_editor_view . source_view . on_draw )
else :
handler_id = None
source_code_view_is_dirty = False
( tab , inner_label , sticky_button ) = create_tab_header ( '' , self . on_tab_close_clicked , self . on_toggle_sticky_clicked , state_m )
set_tab_label_texts ( inner_label , state_m , source_code_view_is_dirty )
state_editor_view . get_top_widget ( ) . title_label = inner_label
state_editor_view . get_top_widget ( ) . sticky_button = sticky_button
page_content = state_editor_view . get_top_widget ( )
page_id = self . view . notebook . prepend_page ( page_content , tab )
page = self . view . notebook . get_nth_page ( page_id )
self . view . notebook . set_tab_reorderable ( page , True )
page . show_all ( )
self . view . notebook . show ( )
self . tabs [ state_identifier ] = { 'page' : page , 'state_m' : state_m , 'controller' : state_editor_ctrl , 'sm_id' : self . model . selected_state_machine_id , 'is_sticky' : False , 'source_code_view_is_dirty' : source_code_view_is_dirty , 'source_code_changed_handler_id' : handler_id }
return page_id |
def security_group_update ( secgroup = None , auth = None , ** kwargs ) :
'''Update a security group
secgroup
Name , ID or Raw Object of the security group to update
name
New name for the security group
description
New description for the security group
CLI Example :
. . code - block : : bash
salt ' * ' neutronng . security _ group _ update secgroup = secgroup1 description = " Very secure security group "
salt ' * ' neutronng . security _ group _ update secgroup = secgroup1 description = " Very secure security group " project _ id = 1dcac318a83b4610b7a7f7ba01465548''' | cloud = get_operator_cloud ( auth )
kwargs = _clean_kwargs ( keep_name = True , ** kwargs )
return cloud . update_security_group ( secgroup , ** kwargs ) |
def from_pretrained ( cls , pretrained_model_name_or_path , cache_dir = None , * inputs , ** kwargs ) :
"""Instantiate a PreTrainedBertModel from a pre - trained model file .
Download and cache the pre - trained model file if needed .""" | if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP :
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP [ pretrained_model_name_or_path ]
if '-cased' in pretrained_model_name_or_path and kwargs . get ( 'do_lower_case' , True ) :
logger . warning ( "The pre-trained model you are loading is a cased model but you have not set " "`do_lower_case` to False. We are setting `do_lower_case=False` for you but " "you may want to check this behavior." )
kwargs [ 'do_lower_case' ] = False
elif '-cased' not in pretrained_model_name_or_path and not kwargs . get ( 'do_lower_case' , True ) :
logger . warning ( "The pre-trained model you are loading is an uncased model but you have set " "`do_lower_case` to False. We are setting `do_lower_case=True` for you " "but you may want to check this behavior." )
kwargs [ 'do_lower_case' ] = True
else :
vocab_file = pretrained_model_name_or_path
if os . path . isdir ( vocab_file ) :
vocab_file = os . path . join ( vocab_file , VOCAB_NAME )
# redirect to the cache , if necessary
try :
resolved_vocab_file = cached_path ( vocab_file , cache_dir = cache_dir )
except EnvironmentError :
logger . error ( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url." . format ( pretrained_model_name_or_path , ', ' . join ( PRETRAINED_VOCAB_ARCHIVE_MAP . keys ( ) ) , vocab_file ) )
return None
if resolved_vocab_file == vocab_file :
logger . info ( "loading vocabulary file {}" . format ( vocab_file ) )
else :
logger . info ( "loading vocabulary file {} from cache at {}" . format ( vocab_file , resolved_vocab_file ) )
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP : # if we ' re using a pretrained model , ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP [ pretrained_model_name_or_path ]
kwargs [ 'max_len' ] = min ( kwargs . get ( 'max_len' , int ( 1e12 ) ) , max_len )
# Instantiate tokenizer .
tokenizer = cls ( resolved_vocab_file , * inputs , ** kwargs )
return tokenizer |
def download_file ( self , remote , local ) :
"""Downloads a file
: param remote : remote file name
: param local : local file name
: return :""" | file = open ( local , 'wb' )
try :
self . download ( remote , file )
finally :
file . close ( ) |
def eigb ( A , y0 , eps , rmax = 150 , nswp = 20 , max_full_size = 1000 , verb = 1 ) :
"""Approximate computation of minimal eigenvalues in tensor train format
This function uses alternating least - squares algorithm for the computation of several
minimal eigenvalues . If you want maximal eigenvalues , just send - A to the function .
: Reference :
S . V . Dolgov , B . N . Khoromskij , I . V . Oseledets , and D . V . Savostyanov .
Computation of extreme eigenvalues in higher dimensions using block tensor train format . Computer Phys . Comm . ,
185(4 ) : 1207-1216 , 2014 . http : / / dx . doi . org / 10.1016 / j . cpc . 2013.12.017
: param A : Matrix in the TT - format
: type A : matrix
: param y0 : Initial guess in the block TT - format , r ( d + 1 ) is the number of eigenvalues sought
: type y0 : tensor
: param eps : Accuracy required
: type eps : float
: param rmax : Maximal rank
: type rmax : int
: param kickrank : Addition rank , the larger the more robus the method ,
: type kickrank : int
: rtype : A tuple ( ev , tensor ) , where ev is a list of eigenvalues , tensor is an approximation to eigenvectors .
: Example :
> > > import tt
> > > import tt . eigb
> > > d = 8 ; f = 3
> > > r = [ 8 ] * ( d * f + 1 ) ; r [ d * f ] = 8 ; r [ 0 ] = 1
> > > x = tt . rand ( n , d * f , r )
> > > a = tt . qlaplace _ dd ( [ 8 , 8 , 8 ] )
> > > sol , ev = tt . eigb . eigb ( a , x , 1e - 6 , verb = 0)
Solving a block eigenvalue problem
Looking for 8 eigenvalues with accuracy 1E - 06
swp : 1 er = 35.93 rmax : 19
swp : 2 er = 4.51015E - 04 rmax : 18
swp : 3 er = 1.87584E - 12 rmax : 17
Total number of matvecs : 0
> > > print ev
[ 0.00044828 0.00089654 0.00089654 0.00089654 0.0013448 0.0013448
0.0013448 0.00164356]""" | ry = y0 . r . copy ( )
lam = tt_eigb . tt_block_eig . tt_eigb ( y0 . d , A . n , A . m , A . tt . r , A . tt . core , y0 . core , ry , eps , rmax , ry [ y0 . d ] , 0 , nswp , max_full_size , verb )
y = tensor ( )
y . d = y0 . d
y . n = A . n . copy ( )
y . r = ry
y . core = tt_eigb . tt_block_eig . result_core . copy ( )
tt_eigb . tt_block_eig . deallocate_result ( )
y . get_ps ( )
return y , lam |
def parse ( self , response , metadata_type ) :
"""Parses RETS metadata using the STANDARD - XML format
: param response : requests Response object
: param metadata _ type : string
: return parsed : list""" | xml = xmltodict . parse ( response . text )
self . analyze_reply_code ( xml_response_dict = xml )
base = xml . get ( 'RETS' , { } ) . get ( 'METADATA' , { } ) . get ( metadata_type , { } )
if metadata_type == 'METADATA-SYSTEM' :
syst = base . get ( 'System' , base . get ( 'SYSTEM' ) )
if not syst :
raise ParseError ( "Could not get the System key from a METADATA-SYSTEM request." )
system_obj = { }
if syst . get ( 'SystemID' ) :
system_obj [ 'system_id' ] = str ( syst [ 'SystemID' ] )
if syst . get ( 'SystemDescription' ) :
system_obj [ 'system_description' ] = str ( syst [ 'SystemDescription' ] )
if syst . get ( 'Comments' ) :
system_obj [ 'comments' ] = syst [ 'Comments' ]
if base . get ( '@Version' ) :
system_obj [ 'version' ] = base [ '@Version' ]
return [ system_obj ]
elif metadata_type == 'METADATA-CLASS' :
key = 'class'
elif metadata_type == 'METADATA-RESOURCE' :
key = 'resource'
elif metadata_type == 'METADATA-LOOKUP_TYPE' :
key = 'lookuptype'
elif metadata_type == 'METADATA-OBJECT' :
key = 'object'
elif metadata_type == 'METADATA-TABLE' :
key = 'field'
else :
msg = "Got an unknown metadata type of {0!s}" . format ( metadata_type )
raise ParseError ( msg )
# Get the version with the right capitalization from the dictionary
key_cap = None
for k in base . keys ( ) :
if k . lower ( ) == key :
key_cap = k
if not key_cap :
msg = 'Could not find {0!s} in the response XML' . format ( key )
raise ParseError ( msg )
if isinstance ( base [ key_cap ] , list ) :
return base [ key_cap ]
else :
return [ base [ key_cap ] ] |
def strip_ansi ( text , c1 = False , osc = False ) :
'''Strip ANSI escape sequences from a portion of text .
https : / / stackoverflow . com / a / 38662876/450917
Arguments :
line : str
osc : bool - include OSC commands in the strippage .
c1 : bool - include C1 commands in the strippage .
Notes :
Enabling c1 and osc stripping is less efficient and the two options
can mildly conflict with one another .
The less problematic order was chosen , so there may still be rare
C1 OSC fragments left over .''' | text = ansi_csi0_finder . sub ( '' , text )
if osc :
text = ansi_osc0_finder . sub ( '' , text )
if c1 :
text = ansi_csi1_finder . sub ( '' , text )
# go first , less destructive
if osc :
text = ansi_osc1_finder . sub ( '' , text )
return text |
def _load_cell ( args , cell_body ) :
"""Implements the BigQuery load magic used to load data from GCS to a table .
The supported syntax is :
% bq load < optional args >
Args :
args : the arguments following ' % bq load ' .
cell _ body : optional contents of the cell interpreted as YAML or JSON .
Returns :
A message about whether the load succeeded or failed .""" | env = google . datalab . utils . commands . notebook_environment ( )
config = google . datalab . utils . commands . parse_config ( cell_body , env , False ) or { }
parameters = config . get ( 'parameters' ) or [ ]
if parameters :
jsonschema . validate ( { 'parameters' : parameters } , BigQuerySchema . QUERY_PARAMS_SCHEMA )
name = google . datalab . bigquery . Query . resolve_parameters ( args [ 'table' ] , parameters )
table = _get_table ( name )
if not table :
table = bigquery . Table ( name )
if args [ 'mode' ] == 'create' :
if table . exists ( ) :
raise Exception ( 'table %s already exists; use "append" or "overwrite" as mode.' % name )
if not cell_body or 'schema' not in cell_body :
raise Exception ( 'Table does not exist, and no schema specified in cell; cannot load.' )
schema = config [ 'schema' ]
# schema can be an instance of bigquery . Schema .
# For example , user can run " my _ schema = bigquery . Schema . from _ data ( df ) " in a previous cell and
# specify " schema : $ my _ schema " in cell input .
if not isinstance ( schema , bigquery . Schema ) :
jsonschema . validate ( { 'schema' : schema } , BigQuerySchema . TABLE_SCHEMA_SCHEMA )
schema = bigquery . Schema ( schema )
table . create ( schema = schema )
elif not table . exists ( ) :
raise Exception ( 'table %s does not exist; use "create" as mode.' % name )
csv_options = bigquery . CSVOptions ( delimiter = args [ 'delimiter' ] , skip_leading_rows = args [ 'skip' ] , allow_jagged_rows = not args [ 'strict' ] , quote = args [ 'quote' ] )
path = google . datalab . bigquery . Query . resolve_parameters ( args [ 'path' ] , parameters )
job = table . load ( path , mode = args [ 'mode' ] , source_format = args [ 'format' ] , csv_options = csv_options , ignore_unknown_values = not args [ 'strict' ] )
if job . failed :
raise Exception ( 'Load failed: %s' % str ( job . fatal_error ) )
elif job . errors :
raise Exception ( 'Load completed with errors: %s' % str ( job . errors ) ) |
def _lookup_user_data ( self , * args , ** kwargs ) :
"""Generic function for looking up values in
a user - specific dictionary . Use as follows : :
_ lookup _ user _ data ( ' path ' , ' to ' , ' desired ' , ' value ' , ' in ' , ' dictionary ' ,
default = < default value > ,
data _ kind = ' customization ' / ' saved _ searches ' )""" | user_data = self . get_user_data ( )
data_kind = kwargs . get ( 'data_kind' , 'customization' )
try :
del ( kwargs [ 'data_kind' ] )
except KeyError , err :
pass
default_value = kwargs [ 'default' ]
result = get_dict ( user_data , data_kind , * args , ** kwargs )
try :
result = int ( result )
except :
pass
if not isinstance ( result , default_value . __class__ ) :
return default_value
else :
return result |
def writeRecord ( self , f ) :
"""This is nearly identical to the original the FAU tag is the only tag not writen in the same place , doing so would require changing the parser and lots of extra logic .""" | if self . bad :
raise BadPubmedRecord ( "This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: '{}'" . format ( self . _sourceLine , self . _sourceFile ) )
else :
authTags = { }
for tag in authorBasedTags :
for val in self . _fieldDict . get ( tag , [ ] ) :
split = val . split ( ' : ' )
try :
authTags [ split [ 0 ] ] . append ( "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , ' : ' . join ( split [ 1 : ] ) . replace ( '\n' , '\n ' ) ) )
except KeyError :
authTags [ split [ 0 ] ] = [ "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , ' : ' . join ( split [ 1 : ] ) . replace ( '\n' , '\n ' ) ) ]
for tag , value in self . _fieldDict . items ( ) :
if tag in authorBasedTags :
continue
else :
for v in value :
f . write ( "{0}{1}- {2}\n" . format ( tag , ' ' * ( 4 - len ( tag ) ) , v . replace ( '\n' , '\n ' ) ) )
if tag == 'AU' :
for authVal in authTags . get ( v , [ ] ) :
f . write ( authVal ) |
def parameters ( self , namespaced = False ) :
"""returns the exception varlink error parameters""" | if namespaced :
return json . loads ( json . dumps ( self . args [ 0 ] [ 'parameters' ] ) , object_hook = lambda d : SimpleNamespace ( ** d ) )
else :
return self . args [ 0 ] . get ( 'parameters' ) |
def batch_eval ( self , exprs , n , extra_constraints = ( ) , solver = None , model_callback = None ) :
"""Evaluate one or multiple expressions .
: param exprs : A list of expressions to evaluate .
: param n : Number of different solutions to return .
: param extra _ constraints : Extra constraints ( as ASTs ) to add to the solver for this solve .
: param solver : A solver object , native to the backend , to assist in the evaluation .
: param model _ callback : a function that will be executed with recovered models ( if any )
: return : A list of up to n tuples , where each tuple is a solution for all expressions .""" | if self . _solver_required and solver is None :
raise BackendError ( "%s requires a solver for batch evaluation" % self . __class__ . __name__ )
converted_exprs = [ self . convert ( ex ) for ex in exprs ]
return self . _batch_eval ( converted_exprs , n , extra_constraints = self . convert_list ( extra_constraints ) , solver = solver , model_callback = model_callback ) |
def move_by_offset ( self , xoffset , yoffset ) :
"""Moving the mouse to an offset from current mouse position .
: Args :
- xoffset : X offset to move to , as a positive or negative integer .
- yoffset : Y offset to move to , as a positive or negative integer .""" | if self . _driver . w3c :
self . w3c_actions . pointer_action . move_by ( xoffset , yoffset )
self . w3c_actions . key_action . pause ( )
else :
self . _actions . append ( lambda : self . _driver . execute ( Command . MOVE_TO , { 'xoffset' : int ( xoffset ) , 'yoffset' : int ( yoffset ) } ) )
return self |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.