signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _handle_response ( self , response ) :
"""Internal helper for handling API responses from the Binance server .
Raises the appropriate exceptions when necessary ; otherwise , returns the
response .""" | if not str ( response . status_code ) . startswith ( '2' ) :
raise BinanceAPIException ( response )
try :
return response . json ( )
except ValueError :
raise BinanceRequestException ( 'Invalid Response: %s' % response . text ) |
def decorate_method ( cls , func ) :
""": param func : func to be decorated
: return : func that is now decorated""" | func_args = [ arg for arg in function_arguments ( func ) if arg != 'self' ]
method_return_types = Endpoint . _parse_function_return_types_from_doc ( func . __doc__ )
name = '%s.%s' % ( cls . path , func . __name__ )
@ wraps ( func )
def method_decorator ( self , * args , ** kwargs ) :
for i in range ( len ( args ) ) :
kwargs [ func_args [ i ] ] = args [ i ]
api_call = self . connection . _pre_process_call ( name , endpoint_params = kwargs )
try :
data = func ( ** kwargs )
except RestException as e :
api_call . error = e
raise e
except Exception as e :
call_queue = self . connection . _call_queue . get ( self . connection . _get_thread_id ( ) , [ ] )
if api_call in call_queue :
call_queue . remove ( api_call )
e = RestException ( original_error = e , stack = traceback . format_exc ( ) )
log . error ( 'ApiCall Exception: %s' % e , exc_info = True )
raise e
return self . connection . _post_process_call ( api_call , data , method_return_types )
method_decorator . rest_method = True
return method_decorator |
def workflow_start ( obj , queue , keep_data , name , workflow_args ) :
"""Send a workflow to the queue .
NAME : The name of the workflow that should be started .
WORKFLOW _ ARGS : Workflow arguments in the form key1 = value1 key2 = value2.""" | try :
start_workflow ( name = name , config = obj [ 'config' ] , queue = queue , clear_data_store = not keep_data , store_args = dict ( [ arg . split ( '=' , maxsplit = 1 ) for arg in workflow_args ] ) )
except ( WorkflowArgumentError , WorkflowImportError ) as e :
click . echo ( _style ( obj [ 'show_color' ] , 'An error occurred when trying to start the workflow' , fg = 'red' , bold = True ) )
click . echo ( '{}' . format ( e ) )
except WorkflowDefinitionError as e :
click . echo ( _style ( obj [ 'show_color' ] , 'The graph {} in workflow {} is not a directed acyclic graph' . format ( e . graph_name , e . workflow_name ) , fg = 'red' , bold = True ) ) |
def check ( cls ) :
"""Class method to check every settings .
Will raise an ` ` ImproperlyConfigured ` ` exception with explanation .""" | if cls == AppSettings :
return None
exceptions = [ ]
for setting in cls . settings . values ( ) :
try :
setting . check ( )
# pylama : ignore = W0703
except Exception as e :
exceptions . append ( str ( e ) )
if exceptions :
raise ImproperlyConfigured ( "\n" . join ( exceptions ) ) |
def setMaximum ( self , maximum ) :
"""setter to _ maximum .
Args :
maximum ( int or long ) : new _ maximum value""" | if not isinstance ( maximum , int ) :
raise TypeError ( "Argument is not of type int or long" )
self . _maximum = maximum |
def _process_backlogged_hosting_devices ( self , context ) :
"""Process currently backlogged devices .
Go through the currently backlogged devices and process them .
For devices which are now reachable ( compared to last time ) , we call
` process _ services ( ) ` passing the now reachable device ' s id .
For devices which have passed the ` hosting _ device _ dead _ timeout ` and
hence presumed dead , execute a RPC to the plugin informing that .
heartbeat revision
res [ ' reachable ' ] - hosting device went from Unknown to Active state
process _ services ( . . . )
res [ ' revived ' ] - hosting device went from Dead to Active
inform device manager that the hosting
device is now responsive
res [ ' dead ' ] - hosting device went from Unknown to Dead
inform device manager that the hosting
device is non - responding
As additional note for the revived case :
Although the plugin was notified , there may be some lag
before the plugin actually can reschedule it ' s backlogged routers .
If process _ services ( device _ ids . . . ) isn ' t successful initially ,
subsequent device syncs will be attempted until
MAX _ DEVICE _ SYNC _ ATTEMPTS occurs . Main process _ service task
will resume if sync _ devices is populated .
: param context : RPC context
: return : None""" | driver_mgr = self . get_routing_service_helper ( ) . driver_manager
res = self . _dev_status . check_backlogged_hosting_devices ( driver_mgr )
if res [ 'reachable' ] :
self . process_services ( device_ids = res [ 'reachable' ] )
if res [ 'revived' ] :
LOG . debug ( "Reporting revived hosting devices: %s " % res [ 'revived' ] )
# trigger a sync only on the revived hosting - devices
if self . conf . cfg_agent . enable_heartbeat is True :
self . devmgr_rpc . report_revived_hosting_devices ( context , hd_ids = res [ 'revived' ] )
self . process_services ( device_ids = res [ 'revived' ] )
if res [ 'dead' ] :
LOG . debug ( "Reporting dead hosting devices: %s" , res [ 'dead' ] )
self . devmgr_rpc . report_dead_hosting_devices ( context , hd_ids = res [ 'dead' ] ) |
def get_on_tmdb ( uri , ** kwargs ) :
"""Get a resource on TMDB .""" | kwargs [ 'api_key' ] = app . config [ 'TMDB_API_KEY' ]
response = requests_session . get ( ( TMDB_API_URL + uri ) . encode ( 'utf8' ) , params = kwargs )
response . raise_for_status ( )
return json . loads ( response . text ) |
def update_trackers ( self ) :
"""Updates the denormalized trackers associated with the forum instance .""" | direct_approved_topics = self . topics . filter ( approved = True ) . order_by ( '-last_post_on' )
# Compute the direct topics count and the direct posts count .
self . direct_topics_count = direct_approved_topics . count ( )
self . direct_posts_count = direct_approved_topics . aggregate ( total_posts_count = Sum ( 'posts_count' ) ) [ 'total_posts_count' ] or 0
# Forces the forum ' s ' last _ post ' ID and ' last _ post _ on ' date to the corresponding values
# associated with the topic with the latest post .
if direct_approved_topics . exists ( ) :
self . last_post_id = direct_approved_topics [ 0 ] . last_post_id
self . last_post_on = direct_approved_topics [ 0 ] . last_post_on
else :
self . last_post_id = None
self . last_post_on = None
# Any save of a forum triggered from the update _ tracker process will not result in checking
# for a change of the forum ' s parent .
self . _simple_save ( ) |
def main ( search_engine , search_option , list_engines , query ) :
"""Quick search command tool for your terminal""" | engine_data = { }
if list_engines :
for name in engines :
conf = get_config ( name )
optionals = filter ( lambda e : e != 'default' , conf . keys ( ) )
if optionals :
click . echo ( '{command} -o {options}' . format ( command = name . replace ( '.json' , '' ) , options = ', ' . join ( optionals ) ) )
else :
click . echo ( name . replace ( '.json' , '' ) )
sys . exit ( 0 )
for name in engines :
if name . find ( search_engine ) == 0 :
engine_data = get_config ( name )
break
# read from standard input if available
if not sys . stdin . isatty ( ) :
query = sys . stdin . read ( )
if not query :
exit_with_error ( 'Query parameter is missing.' )
if not engine_data :
exit_with_error ( 'Engine ``{0}`` not found' . format ( search_engine ) )
if search_option not in engine_data :
exit_with_error ( 'Option ``{0}`` not available for engine ``{1}``' . format ( search_option , search_engine ) )
query = u' ' . join ( query ) if isinstance ( query , tuple ) else query
engine_url = engine_data . get ( search_option )
url = engine_url . format ( query ) . encode ( 'utf-8' )
launch . open ( url ) |
def apply ( self , strain , detector_name , f_lower = None , distance_scale = 1 ) :
"""Add injections ( as seen by a particular detector ) to a time series .
Parameters
strain : TimeSeries
Time series to inject signals into , of type float32 or float64.
detector _ name : string
Name of the detector used for projecting injections .
f _ lower : { None , float } , optional
Low - frequency cutoff for injected signals . If None , use value
provided by each injection .
distance _ scale : { 1 , foat } , optional
Factor to scale the distance of an injection with . The default is
no scaling .
Returns
None
Raises
TypeError
For invalid types of ` strain ` .""" | if strain . dtype not in ( float32 , float64 ) :
raise TypeError ( "Strain dtype must be float32 or float64, not " + str ( strain . dtype ) )
lalstrain = strain . lal ( )
# detector = Detector ( detector _ name )
earth_travel_time = lal . REARTH_SI / lal . C_SI
t0 = float ( strain . start_time ) - earth_travel_time
t1 = float ( strain . end_time ) + earth_travel_time
# pick lalsimulation injection function
add_injection = injection_func_map [ strain . dtype ]
for inj in self . table : # roughly estimate if the injection may overlap with the segment
end_time = inj . get_time_geocent ( )
# CHECK : This is a hack ( 10.0s ) ; replace with an accurate estimate
inj_length = 10.0
eccentricity = 0.0
polarization = 0.0
start_time = end_time - 2 * inj_length
if end_time < t0 or start_time > t1 :
continue
# compute the waveform time series
hp , hc = sim . SimBurstSineGaussian ( float ( inj . q ) , float ( inj . frequency ) , float ( inj . hrss ) , float ( eccentricity ) , float ( polarization ) , float ( strain . delta_t ) )
hp = TimeSeries ( hp . data . data [ : ] , delta_t = hp . deltaT , epoch = hp . epoch )
hc = TimeSeries ( hc . data . data [ : ] , delta_t = hc . deltaT , epoch = hc . epoch )
hp . _epoch += float ( end_time )
hc . _epoch += float ( end_time )
if float ( hp . start_time ) > t1 :
continue
# compute the detector response , taper it if requested
# and add it to the strain
strain = wfutils . taper_timeseries ( strain , inj . taper )
signal_lal = hp . astype ( strain . dtype ) . lal ( )
add_injection ( lalstrain , signal_lal , None )
strain . data [ : ] = lalstrain . data . data [ : ] |
def ellipticity2phi_q ( e1 , e2 ) :
""": param e1:
: param e2:
: return :""" | phi = np . arctan2 ( e2 , e1 ) / 2
c = np . sqrt ( e1 ** 2 + e2 ** 2 )
if c > 0.999 :
c = 0.999
q = ( 1 - c ) / ( 1 + c )
return phi , q |
def pos_tokenize ( self : object , fileids : str ) :
"""Segments , tokenizes , and POS tag a document in the corpus .""" | for para in self . paras ( fileids ) :
yield [ self . pos_tagger ( word_tokenize ( sent ) ) for sent in sent_tokenize ( para ) ] |
def new_table ( self , name , add_id = True , ** kwargs ) :
'''Add a table to the schema , or update it it already exists .
If updating , will only update data .''' | from . import Table
from . exc import NotFoundError
try :
table = self . table ( name )
extant = True
except NotFoundError :
extant = False
if 'sequence_id' not in kwargs :
kwargs [ 'sequence_id' ] = self . _database . next_sequence_id ( Dataset , self . vid , Table )
table = Table ( name = name , d_vid = self . vid , ** kwargs )
table . update_id ( )
# Update possibly extant data
table . data = dict ( ( list ( table . data . items ( ) ) if table . data else [ ] ) + list ( kwargs . get ( 'data' , { } ) . items ( ) ) )
for key , value in list ( kwargs . items ( ) ) :
if not key :
continue
if key [ 0 ] != '_' and key not in [ 'vid' , 'id' , 'id_' , 'd_id' , 'name' , 'sequence_id' , 'table' , 'column' , 'data' ] :
setattr ( table , key , value )
if add_id :
table . add_id_column ( )
if not extant :
self . tables . append ( table )
return table |
def solar_longitude ( day , orb = const . orb_present , days_per_year = None ) :
"""Estimates solar longitude from calendar day .
Method is using an approximation from : cite : ` Berger _ 1978 ` section 3
( lambda = 0 at spring equinox ) .
* * Function - call arguments * * \n
: param array day : Indicator of time of year .
: param dict orb : a dictionary with three members ( as provided by
: class : ` ~ climlab . solar . orbital . OrbitalTable ` )
* ` ` ' ecc ' ` ` - eccentricity
* unit : dimensionless
* default value : ` ` 0.017236 ` `
* ` ` ' long _ peri ' ` ` - longitude of perihelion
( precession angle )
* unit : degrees
* default value : ` ` 281.37 ` `
* ` ` ' obliquity ' ` ` - obliquity angle
* unit : degrees
* default value : ` ` 23.446 ` `
: param float days _ per _ year : number of days in a year ( optional )
( default : 365.2422)
Reads the length of the year from
: mod : ` ~ climlab . utils . constants ` if available .
: returns : solar longitude ` ` lambda _ long ` `
in dimension ` ` ( day . size , ecc . size ) ` `
: rtype : array
Works for both scalar and vector orbital parameters .""" | if days_per_year is None :
days_per_year = const . days_per_year
ecc = orb [ 'ecc' ]
long_peri_rad = deg2rad ( orb [ 'long_peri' ] )
delta_lambda = ( day - 80. ) * 2 * pi / days_per_year
beta = sqrt ( 1 - ecc ** 2 )
lambda_long_m = - 2 * ( ( ecc / 2 + ( ecc ** 3 ) / 8 ) * ( 1 + beta ) * sin ( - long_peri_rad ) - ( ecc ** 2 ) / 4 * ( 1 / 2 + beta ) * sin ( - 2 * long_peri_rad ) + ( ecc ** 3 ) / 8 * ( 1 / 3 + beta ) * sin ( - 3 * long_peri_rad ) ) + delta_lambda
lambda_long = ( lambda_long_m + ( 2 * ecc - ( ecc ** 3 ) / 4 ) * sin ( lambda_long_m - long_peri_rad ) + ( 5 / 4 ) * ( ecc ** 2 ) * sin ( 2 * ( lambda_long_m - long_peri_rad ) ) + ( 13 / 12 ) * ( ecc ** 3 ) * sin ( 3 * ( lambda_long_m - long_peri_rad ) ) )
return lambda_long |
def send_faucet_coins ( address_to_fund , satoshis , api_key , coin_symbol = 'bcy' ) :
'''Send yourself test coins on the bitcoin or blockcypher testnet
You can see your balance info at :
- https : / / live . blockcypher . com / bcy / for BCY
- https : / / live . blockcypher . com / btc - testnet / for BTC Testnet''' | assert coin_symbol in ( 'bcy' , 'btc-testnet' )
assert is_valid_address_for_coinsymbol ( b58_address = address_to_fund , coin_symbol = coin_symbol )
assert satoshis > 0
assert api_key , 'api_key required'
url = make_url ( coin_symbol , 'faucet' )
data = { 'address' : address_to_fund , 'amount' : satoshis , }
params = { 'token' : api_key }
r = requests . post ( url , json = data , params = params , verify = True , timeout = TIMEOUT_IN_SECONDS )
return get_valid_json ( r ) |
def __request_start ( self , queue_item ) :
"""Execute the request in given queue item .
Args :
queue _ item ( : class : ` nyawc . QueueItem ` ) : The request / response pair to scrape .""" | try :
action = self . __options . callbacks . request_before_start ( self . queue , queue_item )
except Exception as e :
action = None
print ( e )
print ( traceback . format_exc ( ) )
if action == CrawlerActions . DO_STOP_CRAWLING :
self . __should_stop = True
if action == CrawlerActions . DO_SKIP_TO_NEXT :
self . queue . move ( queue_item , QueueItem . STATUS_FINISHED )
self . __should_spawn_new_requests = True
if action == CrawlerActions . DO_CONTINUE_CRAWLING or action is None :
self . queue . move ( queue_item , QueueItem . STATUS_IN_PROGRESS )
thread = CrawlerThread ( self . __request_finish , self . __lock , self . __options , queue_item )
self . __threads [ queue_item . get_hash ( ) ] = thread
thread . daemon = True
thread . start ( ) |
def listar_por_ambiente ( self , id_ambiente ) :
"""List all VLANs from an environment .
* * The itens returning from network is there to be compatible with other system * *
: param id _ ambiente : Environment identifier .
: return : Following dictionary :
{ ' vlan ' : [ { ' id ' : < id _ vlan > ,
' nome ' : < nome _ vlan > ,
' num _ vlan ' : < num _ vlan > ,
' ambiente ' : < id _ ambiente > ,
' descricao ' : < descricao > ,
' acl _ file _ name ' : < acl _ file _ name > ,
' acl _ valida ' : < acl _ valida > ,
' acl _ file _ name _ v6 ' : < acl _ file _ name _ v6 > ,
' acl _ valida _ v6 ' : < acl _ valida _ v6 > ,
' ativada ' : < ativada > ,
' id _ tipo _ rede ' : < id _ tipo _ rede > ,
' rede _ oct1 ' : < rede _ oct1 > ,
' rede _ oct2 ' : < rede _ oct2 > ,
' rede _ oct3 ' : < rede _ oct3 > ,
' rede _ oct4 ' : < rede _ oct4 > ,
' bloco ' : < bloco > ,
' mascara _ oct1 ' : < mascara _ oct1 > ,
' mascara _ oct2 ' : < mascara _ oct2 > ,
' mascara _ oct3 ' : < mascara _ oct3 > ,
' mascara _ oct4 ' : < mascara _ oct4 > ,
' broadcast ' : < broadcast > , } , . . . other vlans . . . ] }
: raise InvalidParameterError : Environment id is none or invalid .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | if not is_valid_int_param ( id_ambiente ) :
raise InvalidParameterError ( u'Environment id is none or invalid.' )
url = 'vlan/ambiente/' + str ( id_ambiente ) + '/'
code , xml = self . submit ( None , 'GET' , url )
key = 'vlan'
return get_list_map ( self . response ( code , xml , [ key ] ) , key ) |
def adjust ( cols , light ) :
"""Create palette .""" | raw_colors = [ cols [ 0 ] , * cols , "#FFFFFF" , "#000000" , * cols , "#FFFFFF" ]
return colors . generic_adjust ( raw_colors , light ) |
def encode ( in_bytes ) :
"""Encode a string using Consistent Overhead Byte Stuffing ( COBS ) .
Input is any byte string . Output is also a byte string .
Encoding guarantees no zero bytes in the output . The output
string will be expanded slightly , by a predictable amount .
An empty string is encoded to ' \\ x01'""" | if isinstance ( in_bytes , str ) :
raise TypeError ( 'Unicode-objects must be encoded as bytes first' )
in_bytes_mv = _get_buffer_view ( in_bytes )
final_zero = True
out_bytes = bytearray ( )
idx = 0
search_start_idx = 0
for in_char in in_bytes_mv :
if in_char == b'\x00' :
final_zero = True
out_bytes . append ( idx - search_start_idx + 1 )
out_bytes += in_bytes_mv [ search_start_idx : idx ]
search_start_idx = idx + 1
else :
if idx - search_start_idx == 0xFD :
final_zero = False
out_bytes . append ( 0xFF )
out_bytes += in_bytes_mv [ search_start_idx : idx + 1 ]
search_start_idx = idx + 1
idx += 1
if idx != search_start_idx or final_zero :
out_bytes . append ( idx - search_start_idx + 1 )
out_bytes += in_bytes_mv [ search_start_idx : idx ]
return bytes ( out_bytes ) |
def index ( self ) :
"""main page rendering""" | self . _check_auth ( must_admin = False )
is_admin = self . _check_admin ( )
sess = cherrypy . session
user = sess . get ( SESSION_KEY , None )
if self . auth_mode == 'none' :
user_attrs = None
else :
user_attrs = self . _get_user ( user )
attrs_list = self . attributes . get_search_attributes ( )
return self . temp [ 'index.tmpl' ] . render ( is_admin = is_admin , attrs_list = attrs_list , searchresult = user_attrs , notifications = self . _empty_notification ( ) , ) |
def google_storage ( self , scene , path ) :
"""Google Storage Downloader .
: param scene :
The scene id
: type scene :
String
: param path :
The directory path to where the image should be stored
: type path :
String
: returns :
Boolean""" | sat = self . scene_interpreter ( scene )
url = self . google_storage_url ( sat )
self . remote_file_exists ( url )
self . output ( 'Source: Google Storage' , normal = True , arrow = True )
return self . fetch ( url , path ) |
def experiments_fmri_create ( self , experiment_url , data_file ) :
"""Upload given data file as fMRI for experiment with given Url .
Parameters
experiment _ url : string
Url for experiment resource
data _ file : Abs . Path to file on disk
Functional data file
Returns
scoserv . FunctionalDataHandle
Handle to created fMRI resource""" | # Get the experiment
experiment = self . experiments_get ( experiment_url )
# Upload data
FunctionalDataHandle . create ( experiment . links [ sco . REF_EXPERIMENTS_FMRI_CREATE ] , data_file )
# Get new fmri data handle and return it
return self . experiments_get ( experiment_url ) . fmri_data |
def validate_is_document_type ( option , value ) :
"""Validate the type of method arguments that expect a MongoDB document .""" | if not isinstance ( value , ( abc . MutableMapping , RawBSONDocument ) ) :
raise TypeError ( "%s must be an instance of dict, bson.son.SON, " "bson.raw_bson.RawBSONDocument, or " "a type that inherits from " "collections.MutableMapping" % ( option , ) ) |
def iam ( self ) :
"""Generate iam details .""" | iam = { 'group' : self . format [ 'iam_group' ] . format ( ** self . data ) , 'lambda_role' : self . format [ 'iam_lambda_role' ] . format ( ** self . data ) , 'policy' : self . format [ 'iam_policy' ] . format ( ** self . data ) , 'profile' : self . format [ 'iam_profile' ] . format ( ** self . data ) , 'role' : self . format [ 'iam_role' ] . format ( ** self . data ) , 'user' : self . format [ 'iam_user' ] . format ( ** self . data ) , 'base' : self . format [ 'iam_base' ] . format ( ** self . data ) , }
return iam |
def identify ( self , req , resp , resource , uri_kwargs ) :
"""Identify user using Authenticate header with Token auth .""" | header = req . get_header ( 'Authorization' , False )
auth = header . split ( ' ' ) if header else None
if auth is None or auth [ 0 ] . lower ( ) != 'token' :
return None
if len ( auth ) != 2 :
raise HTTPBadRequest ( "Invalid Authorization header" , "The Authorization header for Token auth should be in form:\n" "Authorization: Token <token_value>" )
return auth [ 1 ] |
def delete_multireddit ( self , name , * args , ** kwargs ) :
"""Delete a Multireddit .
Any additional parameters are passed directly into
: meth : ` ~ praw . _ _ init _ _ . BaseReddit . request `""" | url = self . config [ 'multireddit_about' ] . format ( user = self . user . name , multi = name )
# The modhash isn ' t necessary for OAuth requests
if not self . _use_oauth :
self . http . headers [ 'x-modhash' ] = self . modhash
try :
self . request ( url , data = { } , method = 'DELETE' , * args , ** kwargs )
finally :
if not self . _use_oauth :
del self . http . headers [ 'x-modhash' ] |
async def _dump_container_val ( self , writer , elem , container_type , params = None ) :
"""Single elem dump
: param writer :
: param elem :
: param container _ type :
: param params :
: return :""" | elem_type = container_elem_type ( container_type , params )
await self . dump_field ( writer , elem , elem_type , params [ 1 : ] if params else None ) |
def value_to_int ( attrib , key ) :
"""Massage runs in an inning to 0 if an empty string ,
or key not found . Otherwise return the value""" | val = attrib . get ( key , 0 )
if isinstance ( val , str ) :
if val . isspace ( ) or val == '' :
return 0
return val |
def int_to_string ( number , alphabet , padding = None ) :
"""Convert a number to a string , using the given alphabet .
The output has the most significant digit first .""" | output = ""
alpha_len = len ( alphabet )
while number :
number , digit = divmod ( number , alpha_len )
output += alphabet [ digit ]
if padding :
remainder = max ( padding - len ( output ) , 0 )
output = output + alphabet [ 0 ] * remainder
return output [ : : - 1 ] |
def export ( self , sql_client , merge_rule = 'skip' , coerce = False ) :
'''a method to export all the records in table to another table
: param sql _ client : class object with sql client methods
: param merge _ rule : string with name of rule to adopt for pre - existing records
: param coerce : boolean to enable migration even if table schemas don ' t match
: return : string with exit message
NOTE : available merge rules include : overwrite , skip and upsert''' | title = '%s.export' % self . __class__ . __name__
# validate sql client
method_list = [ 'list' , 'create' , 'read' , 'update' , 'delete' , 'remove' , 'export' , 'exists' , '_construct_inserts' , '_parse_columns' , '_compare_columns' , 'table' , 'session' , 'table_name' , 'database_name' ]
for method in method_list :
if getattr ( sql_client , method , None ) == None :
from labpack . parsing . grammar import join_words
raise ValueError ( '%s(sql_client=...) must be a client object with %s methods.' % ( title , join_words ( method_list ) ) )
# verbosity
export_name = self . table_name
import_name = sql_client . table_name
print ( 'Migrating %s table in %s database to %s table in %s database' % ( export_name , self . database_name , import_name , sql_client . database_name ) , end = '' , flush = True )
# determine differences between tables
export_columns = self . _parse_columns ( )
import_columns = sql_client . _parse_columns ( )
add_columns , remove_columns , rename_columns , retype_columns , resize_columns = self . _compare_columns ( import_columns , export_columns )
if remove_columns or retype_columns or resize_columns :
if not coerce :
raise ValueError ( "Migration from %s to %s prevented because schemas don't match and data could be lost." % ( export_name , import_name ) )
# define upsert reconstructor
def _reconstruct_upsert ( update_kwargs ) :
record_details = { }
current_details = record_details
for key , value in update_kwargs . items ( ) :
record_key = key
record_value = value
record_segments = record_key . split ( '.' )
for i in range ( len ( record_segments ) ) :
segment = record_segments [ i ]
if i + 1 < len ( record_segments ) :
if segment not in record_details . keys ( ) :
current_details [ segment ] = { }
current_details = current_details [ segment ]
else :
if isinstance ( record_value , bytes ) :
current_details [ segment ] = pickle . loads ( record_value )
else :
current_details [ segment ] = record_value
current_details = record_details
return record_details
# migrate records from old to new
list_statement = self . table . select ( )
count = 0
added = 0
skipped = 0
upserted = 0
overwritten = 0
for record in self . session . execute ( list_statement ) . fetchall ( ) :
record_details = self . _reconstruct_record ( record )
primary_key = record_details [ 'id' ]
if not sql_client . exists ( primary_key ) :
create_kwargs = self . _construct_inserts ( record , import_columns , rename_columns , retype_columns , resize_columns )
insert_statement = sql_client . table . insert ( ) . values ( ** create_kwargs )
sql_client . session . execute ( insert_statement )
added += 1
elif merge_rule == 'overwrite' :
sql_client . delete ( primary_key )
create_kwargs = self . _construct_inserts ( record , import_columns , rename_columns , retype_columns , resize_columns )
insert_statement = sql_client . table . insert ( ) . values ( ** create_kwargs )
sql_client . session . execute ( insert_statement )
overwritten += 1
elif merge_rule == 'skip' :
skipped += 1
elif merge_rule == 'upsert' :
update_kwargs = self . _construct_inserts ( record , import_columns , rename_columns , retype_columns , resize_columns )
update_details = _reconstruct_upsert ( update_kwargs )
sql_client . update ( update_details )
upserted += 1
count = added + overwritten + skipped + upserted
if not count % 10 :
print ( '.' , end = '' , flush = True )
# handle verbosity
print ( ' done.' )
# report outcome
plural = ''
skip_insert = ''
overwrite_insert = ''
upsert_insert = ''
if added != 1 :
plural = 's'
if skipped > 0 :
skip_plural = ''
if skipped > 1 :
skip_plural = 's'
skip_insert = ' %s record%s skipped to avoid overwrite.' % ( str ( skipped ) , skip_plural )
if overwritten > 0 :
overwrite_plural = ''
if overwritten > 1 :
overwrite_plural = 's'
overwrite_insert = ' %s record%s overwritten.' % ( str ( overwritten ) , overwrite_plural )
if upserted > 0 :
upsert_plural = ''
if upserted > 1 :
upsert_plural = 's'
upsert_insert = ' %s record%s upserted.' % ( str ( upserted ) , upsert_plural )
exit_msg = '%s record%s added to %s.%s%s%s' % ( str ( added ) , plural , import_name , skip_insert , overwrite_insert , upsert_insert )
print ( exit_msg )
return count |
def update_bookmark ( self , bookmark_id , favorite = None , archive = None , read_percent = None ) :
"""Updates given bookmark . The requested bookmark must belong to the
current user .
: param bookmark _ id : ID of the bookmark to update .
: param favorite ( optional ) : Whether this article is favorited or not .
: param archive ( optional ) : Whether this article is archived or not .
: param read _ percent ( optional ) : The read progress made in this article ,
where 1.0 means the bottom and 0.0 means the very top .""" | rdb_url = self . _generate_url ( 'bookmarks/{0}' . format ( bookmark_id ) )
params = { }
if favorite is not None :
params [ 'favorite' ] = 1 if favorite == True else 0
if archive is not None :
params [ 'archive' ] = 1 if archive == True else 0
if read_percent is not None :
try :
params [ 'read_percent' ] = float ( read_percent )
except ValueError :
pass
return self . post ( rdb_url , params ) |
def parse_criteria ( criteria_string ) :
"""Parses a powerful and simple string criteria and generates a proper
mongo syntax criteria .
Args :
criteria _ string ( str ) : A string representing a search criteria .
Also supports wild cards . E . g . ,
something like " * 2O " gets converted to
{ ' pretty _ formula ' : { ' $ in ' : [ u ' B2O ' , u ' Xe2O ' , u " Li2O " , . . . ] } }
Other syntax examples :
mp - 1234 : Interpreted as a Materials ID .
Fe2O3 or * 2O3 : Interpreted as reduced formulas .
Li - Fe - O or * - Fe - O : Interpreted as chemical systems .
You can mix and match with spaces , which are interpreted as
" OR " . E . g . , " mp - 1234 FeO " means query for all compounds with
reduced formula FeO or with materials _ id mp - 1234.
Returns :
A mongo query dict .""" | toks = criteria_string . split ( )
def parse_sym ( sym ) :
if sym == "*" :
return [ el . symbol for el in Element ]
else :
m = re . match ( r"\{(.*)\}" , sym )
if m :
return [ s . strip ( ) for s in m . group ( 1 ) . split ( "," ) ]
else :
return [ sym ]
def parse_tok ( t ) :
if re . match ( r"\w+-\d+" , t ) :
return { "task_id" : t }
elif "-" in t :
elements = [ parse_sym ( sym ) for sym in t . split ( "-" ) ]
chemsyss = [ ]
for cs in itertools . product ( * elements ) :
if len ( set ( cs ) ) == len ( cs ) : # Check for valid symbols
cs = [ Element ( s ) . symbol for s in cs ]
chemsyss . append ( "-" . join ( sorted ( cs ) ) )
return { "chemsys" : { "$in" : chemsyss } }
else :
all_formulas = set ( )
explicit_els = [ ]
wild_card_els = [ ]
for sym in re . findall ( r"(\*[\.\d]*|\{.*\}[\.\d]*|[A-Z][a-z]*)[\.\d]*" , t ) :
if ( "*" in sym ) or ( "{" in sym ) :
wild_card_els . append ( sym )
else :
m = re . match ( r"([A-Z][a-z]*)[\.\d]*" , sym )
explicit_els . append ( m . group ( 1 ) )
nelements = len ( wild_card_els ) + len ( set ( explicit_els ) )
parts = re . split ( r"(\*|\{.*\})" , t )
parts = [ parse_sym ( s ) for s in parts if s != "" ]
for f in itertools . product ( * parts ) :
c = Composition ( "" . join ( f ) )
if len ( c ) == nelements : # Check for valid Elements in keys .
for e in c . keys ( ) :
Element ( e . symbol )
all_formulas . add ( c . reduced_formula )
return { "pretty_formula" : { "$in" : list ( all_formulas ) } }
if len ( toks ) == 1 :
return parse_tok ( toks [ 0 ] )
else :
return { "$or" : list ( map ( parse_tok , toks ) ) } |
def get_gain ( self , attr_name ) :
"""Calculates the information gain from splitting on the given attribute .""" | subset_entropy = 0.0
for value in iterkeys ( self . _attr_value_counts [ attr_name ] ) :
value_prob = self . get_value_prob ( attr_name , value )
e = self . get_entropy ( attr_name , value )
subset_entropy += value_prob * e
return ( self . main_entropy - subset_entropy ) |
def console_get_char_background ( con : tcod . console . Console , x : int , y : int ) -> Color :
"""Return the background color at the x , y of this console .
. . deprecated : : 8.4
Array access performs significantly faster than using this function .
See : any : ` Console . bg ` .""" | return Color . _new_from_cdata ( lib . TCOD_console_get_char_background ( _console ( con ) , x , y ) ) |
def _platform_patterns ( self , platform = 'generic' , compiled = False ) :
"""Return all the patterns for specific platform .""" | patterns = self . _dict_compiled . get ( platform , None ) if compiled else self . _dict_text . get ( platform , None )
if patterns is None :
raise KeyError ( "Unknown platform: {}" . format ( platform ) )
return patterns |
def enrich ( self , columns ) :
"""This method appends at the end of the dataframe as many
rows as items are found in the list of elemnents in the
provided columns .
This assumes that the length of the lists for the several
specified columns is the same . As an example , for the row A
{ " C1 " : " V1 " , " C2 " : field1 , " C3 " : field2 , " C4 " : field3}
we have three cells with a list of four elements each of them :
* field1 : [ 1,2,3,4]
* field2 : [ " a " , " b " , " c " , " d " ]
* field3 : [ 1.1 , 2.2 , 3.3 , 4.4]
This method converts each of the elements of each cell in a new
row keeping the columns name :
{ " C1 " : " V1 " , " C2 " : 1 , " C3 " : " a " , " C4 " : 1.1}
{ " C1 " : " V1 " , " C2 " : 2 , " C3 " : " b " , " C4 " : 2.2}
{ " C1 " : " V1 " , " C2 " : 3 , " C3 " : " c " , " C4 " : 3.3}
{ " C1 " : " V1 " , " C2 " : 4 , " C3 " : " d " , " C4 " : 4.4}
: param columns : list of strings
: rtype pandas . DataFrame""" | for column in columns :
if column not in self . data . columns :
return self . data
# Looking for the rows with columns with lists of more
# than one element
first_column = list ( self . data [ columns [ 0 ] ] )
count = 0
append_df = pandas . DataFrame ( )
for cell in first_column :
if len ( cell ) >= 1 : # Interested in those lists with more
# than one element
df = pandas . DataFrame ( )
# Create a dataframe of N rows from the list
for column in columns :
df [ column ] = self . data . loc [ count , column ]
# Repeat the original rows N times
extra_df = pandas . DataFrame ( [ self . data . loc [ count ] ] * len ( df ) )
for column in columns :
extra_df [ column ] = list ( df [ column ] )
append_df = append_df . append ( extra_df , ignore_index = True )
extra_df = pandas . DataFrame ( )
count = count + 1
self . data = self . data . append ( append_df , ignore_index = True )
return self . data |
def load ( self , tableName = 'rasters' , rasters = [ ] ) :
'''Accepts a list of paths to raster files to load into the database .
Returns the ids of the rasters loaded successfully in the same order
as the list passed in .''' | # Create table if necessary
Base . metadata . create_all ( self . _engine )
# Create a session
Session = sessionmaker ( bind = self . _engine )
session = Session ( )
for raster in rasters : # Must read in using the raster2pgsql commandline tool .
rasterPath = raster [ 'path' ]
if 'srid' in raster :
srid = str ( raster [ 'srid' ] )
else :
srid = '4326'
if 'no-data' in raster :
noData = str ( raster [ 'no-data' ] )
else :
noData = '-1'
wellKnownBinary = RasterLoader . rasterToWKB ( rasterPath , srid , noData , self . _raster2pgsql )
rasterBinary = wellKnownBinary
# Get the filename
filename = os . path . split ( rasterPath ) [ 1 ]
# Populate raster record
mapKitRaster = MapKitRaster ( )
mapKitRaster . filename = filename
mapKitRaster . raster = rasterBinary
if 'timestamp' in raster :
mapKitRaster . timestamp = raster [ 'timestamp' ]
# Add to session
session . add ( mapKitRaster )
session . commit ( ) |
def _create_worker ( self , worker ) :
"""Common worker setup .""" | worker . sig_started . connect ( self . _start )
self . _workers . append ( worker ) |
def _get ( pseudodict , key , single = True ) :
"""Helper method for getting values from " multi - dict " s""" | matches = [ item [ 1 ] for item in pseudodict if item [ 0 ] == key ]
if single :
return matches [ 0 ]
else :
return matches |
def export_ruptures_csv ( ekey , dstore ) :
""": param ekey : export key , i . e . a pair ( datastore key , fmt )
: param dstore : datastore object""" | oq = dstore [ 'oqparam' ]
if 'scenario' in oq . calculation_mode :
return [ ]
dest = dstore . export_path ( 'ruptures.csv' )
header = ( 'rupid multiplicity mag centroid_lon centroid_lat ' 'centroid_depth trt strike dip rake boundary' ) . split ( )
rows = [ ]
for rgetter in gen_rupture_getters ( dstore ) :
rups = rgetter . get_ruptures ( )
rup_data = calc . RuptureData ( rgetter . trt , rgetter . rlzs_by_gsim )
for r in rup_data . to_array ( rups ) :
rows . append ( ( r [ 'rup_id' ] , r [ 'multiplicity' ] , r [ 'mag' ] , r [ 'lon' ] , r [ 'lat' ] , r [ 'depth' ] , rgetter . trt , r [ 'strike' ] , r [ 'dip' ] , r [ 'rake' ] , r [ 'boundary' ] ) )
rows . sort ( )
# by rupture serial
comment = 'investigation_time=%s, ses_per_logic_tree_path=%s' % ( oq . investigation_time , oq . ses_per_logic_tree_path )
writers . write_csv ( dest , rows , header = header , sep = '\t' , comment = comment )
return [ dest ] |
def simulate ( s0 , transmat , steps = 1 ) :
"""Simulate the next state
Parameters
s0 : ndarray
Vector with state variables at t = 0
transmat : ndarray
The estimated transition / stochastic matrix .
steps : int
( Default : 1 ) The number of steps to simulate model outputs ahead .
If steps > 1 the a Mult - Step Simulation is triggered .
Returns
out : ndarray
( steps = 1 ) Vector with simulated state variables ( ) .
( steps > 1 ) Matrix with out [ : , step ] columns ( Fortran order ) from a
Multi - Step Simulation . The first column is the initial state
vector out [ : , 0 ] = s0 for algorithmic reasons .""" | # Single - Step simulation
if steps == 1 :
return np . dot ( s0 , transmat )
# Multi - Step simulation
out = np . zeros ( shape = ( steps + 1 , len ( s0 ) ) , order = 'C' )
out [ 0 , : ] = s0
for i in range ( 1 , steps + 1 ) :
out [ i , : ] = np . dot ( out [ i - 1 , : ] , transmat )
return out |
def get_deposit ( self , deposit_id , ** params ) :
"""https : / / developers . coinbase . com / api / v2 # show - a - deposit""" | return self . api_client . get_deposit ( self . id , deposit_id , ** params ) |
def trimquality ( self ) :
"""Uses bbduk from the bbmap tool suite to quality and adapter trim""" | logging . info ( "Trimming fastq files" )
# Iterate through strains with fastq files
with progressbar ( self . metadata ) as bar :
for sample in bar : # As the metadata can be populated with ' NA ' ( string ) if there are no fastq files , only process if
# : fastqfiles is a list
if type ( sample . general . fastqfiles ) is list : # Check to see if the fastq files exist
fastqfiles = sorted ( sample . general . fastqfiles )
# Define the output directory
outputdir = sample . general . outputdirectory
# Define the name of the trimmed fastq files
cleanforward = os . path . join ( outputdir , '{}_R1_trimmed.fastq.gz' . format ( sample . name ) )
cleanreverse = os . path . join ( outputdir , '{}_R2_trimmed.fastq.gz' . format ( sample . name ) )
# Incorporate read length into the minlength parameter - set it to 50 unless one or more of the
# reads has a lower calculated length than 50
try :
lesser_length = min ( int ( sample . run . forwardlength ) , int ( sample . run . reverselength ) )
except ValueError :
lesser_length = int ( sample . run . forwardlength )
min_len = 50 if lesser_length >= 50 else lesser_length
# Initialise a variable to store the number of bases to automatically trim from the beginning of
# each read , as these bases tend to have lower quality scores . If trimming the reads will cause
trim_left = 0
# If , for some reason , only the reverse reads are present , use the appropriate output file name
try :
if 'R2' in fastqfiles [ 0 ] :
if not os . path . isfile ( cleanreverse ) :
out , err , bbdukcall = bbtools . bbduk_trim ( forward_in = fastqfiles [ 0 ] , reverse_in = None , forward_out = cleanreverse , trimq = 10 , minlength = min_len , forcetrimleft = trim_left , returncmd = True )
else :
bbdukcall = str ( )
out = str ( )
err = str ( )
else :
if not os . path . isfile ( cleanforward ) :
out , err , bbdukcall = bbtools . bbduk_trim ( forward_in = fastqfiles [ 0 ] , forward_out = cleanforward , trimq = 10 , minlength = min_len , forcetrimleft = trim_left , returncmd = True )
else :
bbdukcall = str ( )
out = str ( )
err = str ( )
except ( IndexError , CalledProcessError ) :
bbdukcall = str ( )
out = str ( )
err = str ( )
# Write the command , stdout , and stderr to the logfile
write_to_logfile ( bbdukcall , bbdukcall , self . logfile , sample . general . logout , sample . general . logerr , None , None )
write_to_logfile ( out , err , self . logfile , sample . general . logout , sample . general . logerr , None , None )
# Add the trimmed fastq files to a list
trimmedfastqfiles = sorted ( glob ( os . path . join ( sample . general . outputdirectory , '*trimmed.fastq.gz' ) ) )
# Populate the metadata if the files exist
sample . general . trimmedfastqfiles = trimmedfastqfiles if trimmedfastqfiles else list ( )
# Add all the trimmed files to the metadata
logging . info ( 'Fastq files trimmed' ) |
def _genotype_in_background ( rec , base_name , back_samples ) :
"""Check if the genotype in the record of interest is present in the background records .""" | def passes ( rec ) :
return not rec . FILTER or len ( rec . FILTER ) == 0
return ( passes ( rec ) and any ( rec . genotype ( base_name ) . gt_alleles == rec . genotype ( back_name ) . gt_alleles for back_name in back_samples ) ) |
def is_capture ( self , move : Move ) -> bool :
"""Checks if the given pseudo - legal move is a capture .""" | return bool ( BB_SQUARES [ move . to_square ] & self . occupied_co [ not self . turn ] ) or self . is_en_passant ( move ) |
def expand_dims ( self , axis ) :
"""Insert a new axis , at a given position in the array shape
Args :
axis ( int ) : Position ( amongst axes ) where new axis is to be inserted .""" | if axis == - 1 :
axis = self . ndim
array = np . expand_dims ( self , axis )
if axis == 0 : # prepended an axis : no longer a Timeseries
return array
else :
new_labels = self . labels . insert ( axis , None )
return Timeseries ( array , self . tspan , new_labels ) |
def add_xmlid ( cr , module , xmlid , model , res_id , noupdate = False ) :
"""Adds an entry in ir _ model _ data . Typically called in the pre script .
One usage example is when an entry has been add in the XML and there is
a high probability that the user has already created the entry manually .
For example , a currency was added in the XML data of the base module
in OpenERP 6 but the user had already created this missing currency
by hand in it ' s 5.0 database . In order to avoid having 2 identical
currencies ( which is in fact blocked by an sql _ constraint ) , you have to
add the entry in ir _ model _ data before the upgrade .""" | # Check if the XMLID doesn ' t already exists
cr . execute ( "SELECT id FROM ir_model_data WHERE module=%s AND name=%s " "AND model=%s" , ( module , xmlid , model ) )
already_exists = cr . fetchone ( )
if already_exists :
return False
else :
logged_query ( cr , "INSERT INTO ir_model_data (create_uid, create_date, " "write_uid, write_date, date_init, date_update, noupdate, " "name, module, model, res_id) " "VALUES (%s, (now() at time zone 'UTC'), %s, " "(now() at time zone 'UTC'), (now() at time zone 'UTC'), " "(now() at time zone 'UTC'), %s, %s, %s, %s, %s)" , ( SUPERUSER_ID , SUPERUSER_ID , noupdate , xmlid , module , model , res_id ) )
return True |
def update_hash ( self , layers : Iterable ) :
"""Calculation of ` hash _ id ` of Layer . Which is determined by the properties of itself , and the ` hash _ id ` s of input layers""" | if self . graph_type == LayerType . input . value :
return
hasher = hashlib . md5 ( )
hasher . update ( LayerType ( self . graph_type ) . name . encode ( 'ascii' ) )
hasher . update ( str ( self . size ) . encode ( 'ascii' ) )
for i in self . input :
if layers [ i ] . hash_id is None :
raise ValueError ( 'Hash id of layer {}: {} not generated!' . format ( i , layers [ i ] ) )
hasher . update ( layers [ i ] . hash_id . encode ( 'ascii' ) )
self . hash_id = hasher . hexdigest ( ) |
def wrapModel ( self , model ) :
"""Converts application - provided model objects to L { IResource } providers .""" | res = IResource ( model , None )
if res is None :
frag = INavigableFragment ( model )
fragmentName = getattr ( frag , 'fragmentName' , None )
if fragmentName is not None :
fragDocFactory = self . _getDocFactory ( fragmentName )
if fragDocFactory is not None :
frag . docFactory = fragDocFactory
if frag . docFactory is None :
raise CouldNotLoadFromThemes ( frag , self . _preferredThemes ( ) )
useAthena = isinstance ( frag , ( athena . LiveFragment , athena . LiveElement ) )
return self . _wrapNavFrag ( frag , useAthena )
else :
return res |
def update_task_redundancy ( config , task_id , redundancy ) :
"""Update task redudancy for a project .""" | if task_id is None :
msg = ( "Are you sure you want to update all the tasks redundancy?" )
if click . confirm ( msg ) :
res = _update_tasks_redundancy ( config , task_id , redundancy )
click . echo ( res )
else :
click . echo ( "Aborting." )
else :
res = _update_tasks_redundancy ( config , task_id , redundancy )
click . echo ( res ) |
def context ( self , outdir , log_prefix ) :
"""Setup instance to extract metrics from the proper run
: param outdir : run directory
: param log _ prefix : log filenames prefix""" | try :
self . _outdir = outdir
self . _log_prefix = log_prefix
yield
finally :
self . _log_prefix = None
self . _outdir = None |
def dependency_status ( data ) :
"""Return abstracted status of dependencies .
- ` ` STATUS _ ERROR ` ` . . one dependency has error status or was deleted
- ` ` STATUS _ DONE ` ` . . all dependencies have done status
- ` ` None ` ` . . other""" | parents_statuses = set ( DataDependency . objects . filter ( child = data , kind = DataDependency . KIND_IO ) . distinct ( 'parent__status' ) . values_list ( 'parent__status' , flat = True ) )
if not parents_statuses :
return Data . STATUS_DONE
if None in parents_statuses : # Some parents have been deleted .
return Data . STATUS_ERROR
if Data . STATUS_ERROR in parents_statuses :
return Data . STATUS_ERROR
if len ( parents_statuses ) == 1 and Data . STATUS_DONE in parents_statuses :
return Data . STATUS_DONE
return None |
def write ( self , image , options , thumbnail ) :
"""Writes the thumbnail image""" | if options [ 'format' ] == 'JPEG' and options . get ( 'progressive' , settings . THUMBNAIL_PROGRESSIVE ) :
image [ 'options' ] [ 'interlace' ] = 'line'
image [ 'options' ] [ 'quality' ] = options [ 'quality' ]
args = settings . THUMBNAIL_CONVERT . split ( ' ' )
args . append ( image [ 'source' ] + '[0]' )
for k in image [ 'options' ] :
v = image [ 'options' ] [ k ]
args . append ( '-%s' % k )
if v is not None :
args . append ( '%s' % v )
flatten = "on"
if 'flatten' in options :
flatten = options [ 'flatten' ]
if settings . THUMBNAIL_FLATTEN and not flatten == "off" :
args . append ( '-flatten' )
suffix = '.%s' % EXTENSIONS [ options [ 'format' ] ]
with NamedTemporaryFile ( suffix = suffix , mode = 'rb' ) as fp :
args . append ( fp . name )
args = map ( smart_str , args )
p = subprocess . Popen ( args , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
returncode = p . wait ( )
out , err = p . communicate ( )
if returncode :
raise EngineError ( "The command %r exited with a non-zero exit code and printed this to stderr: %s" % ( args , err ) )
elif err :
logger . error ( "Captured stderr: %s" , err )
thumbnail . write ( fp . read ( ) ) |
def _load_sequences_to_reference_gene ( self , g_id , force_rerun = False ) :
"""Load orthologous strain sequences to reference Protein object , save as new pickle""" | protein_seqs_pickle_path = op . join ( self . sequences_by_gene_dir , '{}_protein_withseqs.pckl' . format ( g_id ) )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = protein_seqs_pickle_path ) :
protein_pickle_path = self . gene_protein_pickles [ g_id ]
protein_pickle = ssbio . io . load_pickle ( protein_pickle_path )
for strain , info in self . strain_infodict . items ( ) :
strain_sequences = SeqIO . index ( info [ 'genome_path' ] , 'fasta' )
strain_gene_functional = info [ 'functional_genes' ] [ g_id ]
if strain_gene_functional : # Pull the gene ID of the strain from the orthology matrix
strain_gene_key = self . df_orthology_matrix . at [ g_id , strain ]
new_id = '{}_{}' . format ( g_id , strain )
if protein_pickle . sequences . has_id ( new_id ) :
continue
protein_pickle . load_manual_sequence ( seq = strain_sequences [ strain_gene_key ] , ident = new_id , set_as_representative = False )
protein_pickle . save_pickle ( outfile = protein_seqs_pickle_path )
return g_id , protein_seqs_pickle_path |
async def destroy_unit ( self , * unit_names ) :
"""Destroy units by name .""" | connection = self . connection ( )
app_facade = client . ApplicationFacade . from_connection ( connection )
log . debug ( 'Destroying unit%s %s' , 's' if len ( unit_names ) == 1 else '' , ' ' . join ( unit_names ) )
return await app_facade . DestroyUnits ( list ( unit_names ) ) |
def execute ( function , name ) :
"""Execute a task , returning a TaskResult""" | try :
return TaskResult ( name , True , None , function ( ) )
except Exception as exc :
return TaskResult ( name , False , exc , None ) |
def init_config ( self , app ) :
"""Initialize configuration .""" | for k in dir ( config ) :
if k . startswith ( 'JSONSCHEMAS_' ) :
app . config . setdefault ( k , getattr ( config , k ) )
host_setting = app . config [ 'JSONSCHEMAS_HOST' ]
if not host_setting or host_setting == 'localhost' :
app . logger . warning ( 'JSONSCHEMAS_HOST is set to {0}' . format ( host_setting ) ) |
def find_modules ( self , requested_names ) :
"""find the module ( s ) given the name ( s )""" | found_modules = set ( )
for requested_name in requested_names :
is_instance = " " in requested_name
for module_name , module in self . py3_wrapper . output_modules . items ( ) :
if module [ "type" ] == "py3status" :
name = module [ "module" ] . module_nice_name
else :
name = module [ "module" ] . module_name
if is_instance :
if requested_name == name :
found_modules . add ( module_name )
else :
if requested_name == name . split ( " " ) [ 0 ] :
found_modules . add ( module_name )
if self . debug :
self . py3_wrapper . log ( "found %s" % found_modules )
return found_modules |
def rapidfire ( self , max_nlaunch = - 1 , max_loops = 1 , sleep_time = 5 ) :
"""Keeps submitting ` Tasks ` until we are out of jobs or no job is ready to run .
Args :
max _ nlaunch : Maximum number of launches . default : no limit .
max _ loops : Maximum number of loops
sleep _ time : seconds to sleep between rapidfire loop iterations
Returns :
The number of tasks launched .""" | num_launched , do_exit , launched = 0 , False , [ ]
for count in range ( max_loops ) :
if do_exit :
break
if count > 0 :
time . sleep ( sleep_time )
tasks = self . fetch_tasks_to_run ( )
# I don ' t know why but we receive duplicated tasks .
if any ( task in launched for task in tasks ) :
logger . critical ( "numtasks %d already in launched list:\n%s" % ( len ( tasks ) , launched ) )
# Preventive test .
tasks = [ t for t in tasks if t not in launched ]
if not tasks :
continue
for task in tasks :
fired = task . start ( )
if fired :
launched . append ( task )
num_launched += 1
if num_launched >= max_nlaunch > 0 :
logger . info ( 'num_launched >= max_nlaunch, going back to sleep' )
do_exit = True
break
# Update the database .
self . flow . pickle_dump ( )
return num_launched |
def get_objectives_by_query ( self , objective_query ) :
"""Gets a list of ` ` Objectives ` ` matching the given objective query .
arg : objective _ query ( osid . learning . ObjectiveQuery ) : the
objective query
return : ( osid . learning . ObjectiveList ) - the returned
` ` ObjectiveList ` `
raise : NullArgument - ` ` objective _ query ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - ` ` objective _ query ` ` is not of this service
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceQuerySession . get _ resources _ by _ query
and_list = list ( )
or_list = list ( )
for term in objective_query . _query_terms :
if '$in' in objective_query . _query_terms [ term ] and '$nin' in objective_query . _query_terms [ term ] :
and_list . append ( { '$or' : [ { term : { '$in' : objective_query . _query_terms [ term ] [ '$in' ] } } , { term : { '$nin' : objective_query . _query_terms [ term ] [ '$nin' ] } } ] } )
else :
and_list . append ( { term : objective_query . _query_terms [ term ] } )
for term in objective_query . _keyword_terms :
or_list . append ( { term : objective_query . _keyword_terms [ term ] } )
if or_list :
and_list . append ( { '$or' : or_list } )
view_filter = self . _view_filter ( )
if view_filter :
and_list . append ( view_filter )
if and_list :
query_terms = { '$and' : and_list }
collection = JSONClientValidated ( 'learning' , collection = 'Objective' , runtime = self . _runtime )
result = collection . find ( query_terms ) . sort ( '_id' , DESCENDING )
else :
result = [ ]
return objects . ObjectiveList ( result , runtime = self . _runtime , proxy = self . _proxy ) |
def fatal ( msg , exitcode = 1 , ** kwargs ) :
"""Prints a message then exits the program . Optionally pause before exit
with ` pause = True ` kwarg .""" | # NOTE : Can ' t use normal arg named ` pause ` since function has same name .
pause_before_exit = kwargs . pop ( "pause" ) if "pause" in kwargs . keys ( ) else False
echo ( "[FATAL] " + msg , ** kwargs )
if pause_before_exit :
pause ( )
sys . exit ( exitcode ) |
def gen_shell ( opts , ** kwargs ) :
'''Return the correct shell interface for the target system''' | if kwargs [ 'winrm' ] :
try :
import saltwinshell
shell = saltwinshell . Shell ( opts , ** kwargs )
except ImportError :
log . error ( 'The saltwinshell library is not available' )
sys . exit ( salt . defaults . exitcodes . EX_GENERIC )
else :
shell = Shell ( opts , ** kwargs )
return shell |
def reorder_translation_formset_by_language_code ( inline_admin_form ) :
"""Shuffle the forms in the formset of multilingual model in the
order of their language _ ids .""" | lang_to_form = dict ( [ ( form . form . initial [ 'language_id' ] , form ) for form in inline_admin_form ] )
return [ lang_to_form [ language_code ] for language_code in get_language_code_list ( ) ] |
def parse_localnamespacepath ( self , tup_tree ) :
"""Parse a LOCALNAMESPACEPATH element and return the namespace it
represents as a unicode string .
The namespace is formed by joining the namespace components ( one from
each NAMESPACE child element ) with a slash ( e . g . to " root / cimv2 " ) .
< ! ELEMENT LOCALNAMESPACEPATH ( NAMESPACE + ) >""" | self . check_node ( tup_tree , 'LOCALNAMESPACEPATH' , ( ) , ( ) , ( 'NAMESPACE' , ) )
if not kids ( tup_tree ) :
raise CIMXMLParseError ( _format ( "Element {0!A} missing child elements (expecting one " "or more child elements 'NAMESPACE')" , name ( tup_tree ) ) , conn_id = self . conn_id )
# self . list _ of _ various ( ) has the same effect as self . list _ of _ same ( )
# when used with a single allowed child element , but is a little
# faster .
ns_list = self . list_of_various ( tup_tree , ( 'NAMESPACE' , ) )
return u'/' . join ( ns_list ) |
def get_identity ( self , subject_id , entities = None ) :
"""Get all the identity information that has been received and
are still valid about the subject .
: param subject _ id : The identifier of the subject
: param entities : The identifiers of the entities whoes assertions are
interesting . If the list is empty all entities are interesting .
: return : A 2 - tuple consisting of the identity information ( a
dictionary of attributes and values ) and the list of entities
whoes information has timed out .""" | if not entities :
entities = self . entities ( subject_id )
if not entities :
return { } , [ ]
res = { }
oldees = [ ]
for ( entity_id , item ) in self . _cache . get_multi ( entities , subject_id + '_' ) . items ( ) :
try :
info = self . get_info ( item )
except ToOld :
oldees . append ( entity_id )
continue
for key , vals in info [ "ava" ] . items ( ) :
try :
tmp = set ( res [ key ] ) . union ( set ( vals ) )
res [ key ] = list ( tmp )
except KeyError :
res [ key ] = vals
return res , oldees |
def rpc_request ( self , frame_out , connection_adapter = None ) :
"""Perform a RPC Request .
: param specification . Frame frame _ out : Amqp frame .
: rtype : dict""" | with self . rpc . lock :
uuid = self . rpc . register_request ( frame_out . valid_responses )
self . _connection . write_frame ( self . channel_id , frame_out )
return self . rpc . get_request ( uuid , connection_adapter = connection_adapter ) |
def setup ( self ) :
"""performs data collection for qpid broker""" | options = ""
amqps_prefix = ""
# set amqps : / / when SSL is used
if self . get_option ( "ssl" ) :
amqps_prefix = "amqps://"
# for either present option , add - - option = value to ' options ' variable
for option in [ "ssl-certificate" , "ssl-key" ] :
if self . get_option ( option ) :
amqps_prefix = "amqps://"
options = ( options + " --%s=" % ( option ) + self . get_option ( option ) )
if self . get_option ( "port" ) :
options = ( options + " -b " + amqps_prefix + "localhost:%s" % ( self . get_option ( "port" ) ) )
self . add_cmd_output ( [ "qpid-stat -g" + options , # applies since 0.18 version
"qpid-stat -b" + options , # applies to pre - 0.18 versions
"qpid-stat -c" + options , "qpid-stat -e" + options , "qpid-stat -q" + options , "qpid-stat -u" + options , "qpid-stat -m" + options , # applies since 0.18 version
"qpid-config exchanges" + options , "qpid-config queues" + options , "qpid-config exchanges -b" + options , # applies to pre - 0.18 vers .
"qpid-config queues -b" + options , # applies to pre - 0.18 versions
"qpid-config exchanges -r" + options , # applies since 0.18 version
"qpid-config queues -r" + options , # applies since 0.18 version
"qpid-route link list" + options , "qpid-route route list" + options , "qpid-cluster" + options , # applies to pre - 0.22 versions
"qpid-ha query" + options , # applies since 0.22 version
"ls -lanR /var/lib/qpidd" ] )
self . add_copy_spec ( [ "/etc/qpidd.conf" , # applies to pre - 0.22 versions
"/etc/qpid/qpidd.conf" , # applies since 0.22 version
"/var/lib/qpid/syslog" , "/etc/ais/openais.conf" , "/var/log/cumin.log" , "/var/log/mint.log" , "/etc/sasl2/qpidd.conf" , "/etc/qpid/qpidc.conf" , "/etc/sesame/sesame.conf" , "/etc/cumin/cumin.conf" , "/etc/corosync/corosync.conf" , "/var/lib/sesame" , "/var/log/qpidd.log" , "/var/log/sesame" , "/var/log/cumin" ] ) |
def _fetch ( self , default_path ) :
"""Internal method for fetching .
This differs from : meth : ` . fetch ` in that it accepts a default path as
an argument .""" | if not self . _path :
path = default_path
else :
path = self . _path
req_type = 'GET' if len ( self . _post_params ) == 0 else 'POST'
url = '/' . join ( [ 'http:/' , self . spacegdn . endpoint , path ] )
resp = requests . request ( req_type , url , params = self . _get_params , data = self . _post_params , headers = self . _headers )
response = Response ( )
data = None
if resp . ok :
data = resp . json ( )
response . add ( data , resp . status_code , resp . reason )
return response |
def parse_dict ( self , attrs ) :
"""Read a dict to attributes .""" | attrs = attrs or { }
ident = attrs . get ( "id" , "" )
classes = attrs . get ( "classes" , [ ] )
kvs = OrderedDict ( ( k , v ) for k , v in attrs . items ( ) if k not in ( "classes" , "id" ) )
return ident , classes , kvs |
def clear ( self ) :
"""Command : 0x03
clear all leds
Data :
[ Command ]""" | header = bytearray ( )
header . append ( LightProtocolCommand . Clear )
return self . send ( header ) |
def get_entry ( self , key , column = None , table = None ) :
"""Get a specific entry .""" | if table is None :
table = self . main_table
if column is None :
column = "id"
if isinstance ( key , basestring ) :
key = key . replace ( "'" , "''" )
query = 'SELECT * from "%s" where "%s"=="%s" LIMIT 1;'
query = query % ( table , column , key )
self . own_cursor . execute ( query )
return self . own_cursor . fetchone ( ) |
def download_url ( url , destination = None , progress_bar = True ) :
"""Download a URL to a local file .
Parameters
url : str
The URL to download .
destination : str , None
The destination of the file . If None is given the file is saved to a temporary directory .
progress _ bar : bool
Whether to show a command - line progress bar while downloading .
Returns
filename : str
The location of the downloaded file .
Notes
Progress bar use / example adapted from tqdm documentation : https : / / github . com / tqdm / tqdm""" | def my_hook ( t ) :
last_b = [ 0 ]
def inner ( b = 1 , bsize = 1 , tsize = None ) :
if tsize is not None :
t . total = tsize
if b > 0 :
t . update ( ( b - last_b [ 0 ] ) * bsize )
last_b [ 0 ] = b
return inner
if progress_bar :
with tqdm ( unit = 'B' , unit_scale = True , miniters = 1 , desc = url . split ( '/' ) [ - 1 ] ) as t :
filename , _ = urlretrieve ( url , filename = destination , reporthook = my_hook ( t ) )
else :
filename , _ = urlretrieve ( url , filename = destination ) |
def create ( self , name , * args , ** kwargs ) :
"""Need to wrap the default call to handle exceptions .""" | try :
return super ( ImageMemberManager , self ) . create ( name , * args , ** kwargs )
except Exception as e :
if e . http_status == 403 :
raise exc . UnsharableImage ( "You cannot share a public image." )
else :
raise |
def datetime_from_iso8601 ( date ) :
"""Small helper that parses ISO - 8601 date dates .
> > > datetime _ from _ iso8601 ( " 2013-04-10T12:52:39 " )
datetime . datetime ( 2013 , 4 , 10 , 12 , 52 , 39)
> > > datetime _ from _ iso8601 ( " 2013-01-07T12:55:19.257 " )
datetime . datetime ( 2013 , 1 , 7 , 12 , 55 , 19 , 257000)""" | format = ISO8610_FORMAT
if date . endswith ( "Z" ) :
date = date [ : - 1 ]
# Date date is UTC
if re . match ( ".*\.\d+" , date ) : # Date includes microseconds
format = ISO8610_FORMAT_MICROSECONDS
return datetime . datetime . strptime ( date , format ) |
def _manageColumns ( self , action , varBind , ** context ) :
"""Apply a management action on all columns
Parameters
action : : py : class : ` str ` any of : py : class : ` MibInstrumController ` ' s states
to apply on all columns but the one passed in ` varBind `
varBind : : py : class : ` ~ pysnmp . smi . rfc1902 . ObjectType ` object representing
new : py : class : ` RowStatus ` Managed Object Instance value being set
on table row
Other Parameters
\*\*context:
Query parameters :
* ` cbFun ` ( callable ) - user - supplied callable that is invoked once
all columns have been processed or an error occurs
Notes
The callback functions ( e . g . ` cbFun ` ) expects two parameters : ` varBind `
and ` * * context ` .
In case of an error , the ` error ` key in the ` context ` dict will contain
an exception object .
Assumes that row consistency check has been triggered by RowStatus
columnar object transition into ` active ` state .""" | name , val = varBind
( debug . logger & debug . FLAG_INS and debug . logger ( '%s: _manageColumns(%s, %s, %r)' % ( self , action , name , val ) ) )
cbFun = context [ 'cbFun' ]
colLen = len ( self . name ) + 1
# Build a map of index names and values for automatic initialization
indexVals = { }
instId = name [ colLen : ]
indices = [ ]
for impliedFlag , modName , symName in self . _indexNames :
mibObj , = mibBuilder . importSymbols ( modName , symName )
syntax , instId = self . oidToValue ( mibObj . syntax , instId , impliedFlag , indices )
indexVals [ mibObj . name ] = syntax
indices . append ( syntax )
count = [ len ( self . _vars ) ]
if name [ : colLen ] in self . _vars :
count [ 0 ] -= 1
def _cbFun ( varBind , ** context ) :
count [ 0 ] -= 1
if not count [ 0 ] :
cbFun ( varBind , ** context )
for colName , colObj in self . _vars . items ( ) :
acFun = context . get ( 'acFun' )
if colName in indexVals :
colInstanceValue = indexVals [ colName ]
# Index column is usually read - only
acFun = None
elif name [ : colLen ] == colName : # status column is following ` write ` path
continue
else :
colInstanceValue = None
actionFun = getattr ( colObj , action )
colInstanceName = colName + name [ colLen : ]
actionFun ( ( colInstanceName , colInstanceValue ) , ** dict ( context , acFun = acFun , cbFun = _cbFun ) )
debug . logger & debug . FLAG_INS and debug . logger ( '_manageColumns: action %s name %s instance %s %svalue %r' % ( action , name , instId , name in indexVals and "index " or "" , indexVals . get ( name , val ) ) ) |
def get_logs ( self , project , release_id , ** kwargs ) :
"""GetLogs .
[ Preview API ] Get logs for a release Id .
: param str project : Project ID or project name
: param int release _ id : Id of the release .
: rtype : object""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if release_id is not None :
route_values [ 'releaseId' ] = self . _serialize . url ( 'release_id' , release_id , 'int' )
response = self . _send ( http_method = 'GET' , location_id = 'c37fbab5-214b-48e4-a55b-cb6b4f6e4038' , version = '5.0-preview.2' , route_values = route_values , accept_media_type = 'application/zip' )
if "callback" in kwargs :
callback = kwargs [ "callback" ]
else :
callback = None
return self . _client . stream_download ( response , callback = callback ) |
def build ( ctx , skip ) :
"""Build documentation as HTML .
This command performs these steps :
1 . Removes any existing symlinks in the ` ` modules ` ` , ` ` packages ` ` , and
` ` _ static ` ` directories .
2 . Finds packages set up by EUPS that have Sphinx - enabled doc / directories
and links their module and package directories into the
` ` pipelines _ lsst _ io ` ` repository . The ` ` doc / manifest . yaml ` ` file in
each package is what defines the package and module documentation
directories for each package .
3 . Run a single , monolithic Sphinx build on the ` ` pipelines _ lsst _ io ` `
repository and linked packages .
By default , the build site is located in the ` ` _ build / html ` ` directory
of the ` ` pipelines _ lsst _ io ` ` repository .
To peek inside the build process , see the ` ` documenteer . stackdocs . build ` `
APIs .""" | return_code = build_stack_docs ( ctx . obj [ 'root_project_dir' ] , skippedNames = skip )
if return_code > 0 :
sys . exit ( return_code ) |
def _EnsureRequesterStarted ( self ) :
"""Checks if the analyzer is running and starts it if not .""" | if not self . _analyzer_started :
self . _analyzer . start ( )
self . _analyzer_started = True |
def _set_group_selection ( self ) :
"""Create group based selection .
Used when selection is not passed directly but instead via a grouper .
NOTE : this should be paired with a call to _ reset _ group _ selection""" | grp = self . grouper
if not ( self . as_index and getattr ( grp , 'groupings' , None ) is not None and self . obj . ndim > 1 and self . _group_selection is None ) :
return
ax = self . obj . _info_axis
groupers = [ g . name for g in grp . groupings if g . level is None and g . in_axis ]
if len ( groupers ) : # GH12839 clear selected obj cache when group selection changes
self . _group_selection = ax . difference ( Index ( groupers ) , sort = False ) . tolist ( )
self . _reset_cache ( '_selected_obj' ) |
def p_ex_map_pair ( self , p ) :
"""ex _ map _ pair : ex _ map _ elem COLON ex _ map _ elem""" | try :
p [ 0 ] = { p [ 1 ] : p [ 3 ] }
except TypeError :
msg = u"%s is an invalid hash key because it cannot be hashed." % repr ( p [ 1 ] )
self . errors . append ( ( msg , p . lineno ( 2 ) , self . path ) )
p [ 0 ] = { } |
def get ( self , key ) :
'''Get a value for a given key
: param key : entry ' s key
: return : corresponding value''' | if key in self . _data_fields :
return self . _data_fields [ key ]
if key in self . _sub_reports :
return self . _sub_reports [ key ]
return None |
def __getDummyDateList ( ) :
"""Generate a dummy date list for testing without
hitting the server""" | D = [ ]
for y in xrange ( 2001 , 2010 ) :
for d in xrange ( 1 , 365 , 1 ) :
D . append ( 'A%04d%03d' % ( y , d ) )
return D |
def logNormalRDD ( sc , mean , std , size , numPartitions = None , seed = None ) :
"""Generates an RDD comprised of i . i . d . samples from the log normal
distribution with the input mean and standard distribution .
: param sc : SparkContext used to create the RDD .
: param mean : mean for the log Normal distribution
: param std : std for the log Normal distribution
: param size : Size of the RDD .
: param numPartitions : Number of partitions in the RDD ( default : ` sc . defaultParallelism ` ) .
: param seed : Random seed ( default : a random long integer ) .
: return : RDD of float comprised of i . i . d . samples ~ log N ( mean , std ) .
> > > from math import sqrt , exp
> > > mean = 0.0
> > > std = 1.0
> > > expMean = exp ( mean + 0.5 * std * std )
> > > expStd = sqrt ( ( exp ( std * std ) - 1.0 ) * exp ( 2.0 * mean + std * std ) )
> > > x = RandomRDDs . logNormalRDD ( sc , mean , std , 1000 , seed = 2)
> > > stats = x . stats ( )
> > > stats . count ( )
1000
> > > abs ( stats . mean ( ) - expMean ) < 0.5
True
> > > from math import sqrt
> > > abs ( stats . stdev ( ) - expStd ) < 0.5
True""" | return callMLlibFunc ( "logNormalRDD" , sc . _jsc , float ( mean ) , float ( std ) , size , numPartitions , seed ) |
def _default_to_pandas ( self , op , * args , ** kwargs ) :
"""Helper method to use default pandas function""" | empty_self_str = "" if not self . empty else " for empty DataFrame"
ErrorMessage . default_to_pandas ( "`{}.{}`{}" . format ( self . __name__ , op if isinstance ( op , str ) else op . __name__ , empty_self_str , ) )
if callable ( op ) :
result = op ( self . _to_pandas ( ) , * args , ** kwargs )
elif isinstance ( op , str ) : # The inner ` getattr ` is ensuring that we are treating this object ( whether
# it is a DataFrame , Series , etc . ) as a pandas object . The outer ` getattr `
# will get the operation ( ` op ` ) from the pandas version of the class and run
# it on the object after we have converted it to pandas .
result = getattr ( getattr ( pandas , self . __name__ ) , op ) ( self . _to_pandas ( ) , * args , ** kwargs )
# SparseDataFrames cannot be serialize by arrow and cause problems for Modin .
# For now we will use pandas .
if isinstance ( result , type ( self ) ) and not isinstance ( result , ( pandas . SparseDataFrame , pandas . SparseSeries ) ) :
return self . _create_or_update_from_compiler ( result , inplace = kwargs . get ( "inplace" , False ) )
elif isinstance ( result , pandas . DataFrame ) :
from . dataframe import DataFrame
return DataFrame ( result )
elif isinstance ( result , pandas . Series ) :
from . series import Series
return Series ( result )
else :
try :
if ( isinstance ( result , ( list , tuple ) ) and len ( result ) == 2 and isinstance ( result [ 0 ] , pandas . DataFrame ) ) : # Some operations split the DataFrame into two ( e . g . align ) . We need to wrap
# both of the returned results
if isinstance ( result [ 1 ] , pandas . DataFrame ) :
second = self . __constructor__ ( result [ 1 ] )
else :
second = result [ 1 ]
return self . __constructor__ ( result [ 0 ] ) , second
else :
return result
except TypeError :
return result |
def install_webpi ( name , install_args = None , override_args = False ) :
'''Instructs Chocolatey to install a package via the Microsoft Web PI service .
name
The name of the package to be installed . Only accepts a single argument .
install _ args
A list of install arguments you want to pass to the installation process
i . e product key or feature list
override _ args
Set to true if you want to override the original install arguments ( for
the native installer ) in the package and use your own . When this is set
to False install _ args will be appended to the end of the default
arguments
CLI Example :
. . code - block : : bash
salt ' * ' chocolatey . install _ webpi < package name >
salt ' * ' chocolatey . install _ webpi < package name > install _ args = < args > override _ args = True''' | return install ( name , source = 'webpi' , install_args = install_args , override_args = override_args ) |
def incrementing_sleep ( self , previous_attempt_number , delay_since_first_attempt_ms ) :
"""Sleep an incremental amount of time after each attempt , starting at
wait _ incrementing _ start and incrementing by wait _ incrementing _ increment""" | result = self . _wait_incrementing_start + ( self . _wait_incrementing_increment * ( previous_attempt_number - 1 ) )
if result < 0 :
result = 0
return result |
def remove_stacktrace_locals ( client , event ) :
"""Removes local variables from any frames .
: param client : an ElasticAPM client
: param event : a transaction or error event
: return : The modified event""" | func = lambda frame : frame . pop ( "vars" , None )
return _process_stack_frames ( event , func ) |
def ReadHuntLogEntries ( self , hunt_id , offset , count , with_substring = None ) :
"""Reads hunt log entries of a given hunt using given query options .""" | all_entries = [ ]
for flow_obj in self . _GetHuntFlows ( hunt_id ) :
for entry in self . ReadFlowLogEntries ( flow_obj . client_id , flow_obj . flow_id , 0 , sys . maxsize , with_substring = with_substring ) :
all_entries . append ( rdf_flow_objects . FlowLogEntry ( hunt_id = hunt_id , client_id = flow_obj . client_id , flow_id = flow_obj . flow_id , timestamp = entry . timestamp , message = entry . message ) )
return sorted ( all_entries , key = lambda x : x . timestamp ) [ offset : offset + count ] |
def smart_unicode_decode ( encoded_string ) :
"""Given an encoded string of unknown format , detect the format with
chardet and return the unicode version .
Example input from bug # 11:
( ' \xfe \xff \x00 I \x00 n \x00 s \x00 p \x00 e \x00 c \x00 t \x00 i \x00 o \x00 n \x00 '
' \x00 R \x00 e \x00 p \x00 o \x00 r \x00 t \x00 \x00 v \x00 2 \x00 . \x00 2 ' )""" | if not encoded_string :
return u''
# optimization - - first try ascii
try :
return encoded_string . decode ( 'ascii' )
except UnicodeDecodeError :
pass
# detect encoding
detected_encoding = chardet . detect ( encoded_string )
# bug 54 - - depending on chardet version , if encoding is not guessed ,
# either detected _ encoding will be None or detected _ encoding [ ' encoding ' ] will be None
detected_encoding = detected_encoding [ 'encoding' ] if detected_encoding and detected_encoding . get ( 'encoding' ) else 'utf8'
decoded_string = six . text_type ( encoded_string , encoding = detected_encoding , errors = 'replace' )
# unicode string may still have useless BOM character at the beginning
if decoded_string and decoded_string [ 0 ] in bom_headers :
decoded_string = decoded_string [ 1 : ]
return decoded_string |
def set_pending_boot_mode ( self , boot_mode ) :
"""Sets the boot mode of the system for next boot .
: param boot _ mode : either ' uefi ' or ' legacy ' .
: raises : IloInvalidInputError , on an invalid input .
: raises : IloError , on an error from iLO .""" | sushy_system = self . _get_sushy_system ( PROLIANT_SYSTEM_ID )
if boot_mode . upper ( ) not in BOOT_MODE_MAP_REV . keys ( ) :
msg = ( ( 'Invalid Boot mode: "%(boot_mode)s" specified, valid boot ' 'modes are either "uefi" or "legacy"' ) % { 'boot_mode' : boot_mode } )
raise exception . IloInvalidInputError ( msg )
try :
sushy_system . bios_settings . pending_settings . set_pending_boot_mode ( BOOT_MODE_MAP_REV . get ( boot_mode . upper ( ) ) )
except sushy . exceptions . SushyError as e :
msg = ( self . _ ( 'The Redfish controller failed to set ' 'pending boot mode to %(boot_mode)s. ' 'Error: %(error)s' ) % { 'boot_mode' : boot_mode , 'error' : str ( e ) } )
LOG . debug ( msg )
raise exception . IloError ( msg ) |
def _make_carpet ( self , rescale_data ) :
"""Constructs the carpet from the input image .
Optional rescaling of the data .""" | self . carpet = self . _unroll_array ( self . input_image , self . fixed_dim )
if rescale_data :
self . carpet = row_wise_rescale ( self . carpet ) |
def save ( self , model , joining = None , touch = True ) :
"""Save a new model and attach it to the parent model .
: type model : eloquent . Model
: type joining : dict
: type touch : bool
: rtype : eloquent . Model""" | if joining is None :
joining = { }
model . save ( { 'touch' : False } )
self . attach ( model . get_key ( ) , joining , touch )
return model |
def update_builds ( self , builds , project ) :
"""UpdateBuilds .
Updates multiple builds .
: param [ Build ] builds : The builds to update .
: param str project : Project ID or project name
: rtype : [ Build ]""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
content = self . _serialize . body ( builds , '[Build]' )
response = self . _send ( http_method = 'PATCH' , location_id = '0cd358e1-9217-4d94-8269-1c1ee6f93dcf' , version = '5.0' , route_values = route_values , content = content )
return self . _deserialize ( '[Build]' , self . _unwrap_collection ( response ) ) |
def save ( self , status = None , callback_pos = None , id_workflow = None ) :
"""Save object to persistent storage .""" | if self . model is None :
raise WorkflowsMissingModel ( )
with db . session . begin_nested ( ) :
workflow_object_before_save . send ( self )
self . model . modified = datetime . now ( )
if status is not None :
self . model . status = status
if id_workflow is not None :
workflow = Workflow . query . filter_by ( uuid = id_workflow ) . one ( )
self . model . workflow = workflow
# Special handling of JSON fields to mark update
if self . model . callback_pos is None :
self . model . callback_pos = list ( )
elif callback_pos is not None :
self . model . callback_pos = callback_pos
flag_modified ( self . model , 'callback_pos' )
if self . model . data is None :
self . model . data = dict ( )
flag_modified ( self . model , 'data' )
if self . model . extra_data is None :
self . model . extra_data = dict ( )
flag_modified ( self . model , 'extra_data' )
db . session . merge ( self . model )
if self . id is not None :
self . log . debug ( "Saved object: {id} at {callback_pos}" . format ( id = self . model . id or "new" , callback_pos = self . model . callback_pos ) )
workflow_object_after_save . send ( self ) |
def set_output_format ( output_format ) :
"""Sets output format ; returns standard bits of table . These are :
ttx : how to start a title for a set of tables
xtt : how to end a title for a set of tables
tx : how to start a table
xt : how to close a table
capx : how to start a caption for the table
xcap : how to close a caption for the table
rx : how to start a row and the first cell in the row
xr : how to close a row and the last cell in the row
rspx : how to start a cell with a row span argument
xrsp : how to close the row span argument
cx : how to open a cell
xc : how to close a cell""" | if output_format == 'wiki' :
ttx = '== '
xtt = ' =='
tx = ''
xt = ''
capx = "'''"
xcap = "'''"
rx = '|'
xr = '|'
rspx = '|<|'
xrsp = '>'
cx = '|'
xc = '|'
hlx = '['
hxl = ' '
xhl = ']'
elif output_format == "html" :
ttx = '<b>'
xtt = '</b><hr>'
tx = '<table border = "1">'
xt = '</table><br><br>'
capx = '<caption>'
xcap = '</caption>'
rx = '<tr>'
xr = '</tr>'
rspx = '<td rowspan='
xrsp = '>'
cx = '<td>'
xc = '</td>'
hlx = '<a href="'
hxl = '">'
xhl = "</a>"
else :
raise ValueError ( "unrecognized output_format %s" % output_format )
return ttx , xtt , tx , xt , capx , xcap , rx , xr , cx , xc , rspx , xrsp , hlx , hxl , xhl |
def grab ( self , monitor ) : # type : ( Monitor ) - > ScreenShot
"""See : meth : ` MSSMixin . grab < mss . base . MSSMixin . grab > ` for full details .""" | # pylint : disable = too - many - locals
# Convert PIL bbox style
if isinstance ( monitor , tuple ) :
monitor = { "left" : monitor [ 0 ] , "top" : monitor [ 1 ] , "width" : monitor [ 2 ] - monitor [ 0 ] , "height" : monitor [ 3 ] - monitor [ 1 ] , }
core = self . core
rect = CGRect ( ( monitor [ "left" ] , monitor [ "top" ] ) , ( monitor [ "width" ] , monitor [ "height" ] ) )
image_ref = core . CGWindowListCreateImage ( rect , 1 , 0 , 0 )
if not image_ref :
raise ScreenShotError ( "CoreGraphics.CGWindowListCreateImage() failed." )
width = int ( core . CGImageGetWidth ( image_ref ) )
height = int ( core . CGImageGetHeight ( image_ref ) )
prov = copy_data = None
try :
prov = core . CGImageGetDataProvider ( image_ref )
copy_data = core . CGDataProviderCopyData ( prov )
data_ref = core . CFDataGetBytePtr ( copy_data )
buf_len = core . CFDataGetLength ( copy_data )
raw = ctypes . cast ( data_ref , ctypes . POINTER ( ctypes . c_ubyte * buf_len ) )
data = bytearray ( raw . contents )
# Remove padding per row
bytes_per_row = int ( core . CGImageGetBytesPerRow ( image_ref ) )
bytes_per_pixel = int ( core . CGImageGetBitsPerPixel ( image_ref ) )
bytes_per_pixel = ( bytes_per_pixel + 7 ) // 8
if bytes_per_pixel * width != bytes_per_row :
cropped = bytearray ( )
for row in range ( height ) :
start = row * bytes_per_row
end = start + width * bytes_per_pixel
cropped . extend ( data [ start : end ] )
data = cropped
finally :
if prov :
core . CGDataProviderRelease ( prov )
if copy_data :
core . CFRelease ( copy_data )
return self . cls_image ( data , monitor , size = Size ( width , height ) ) |
def nt_event_log_handler ( name , logname , appname , dllname = None , logtype = "Application" ) :
"""A Bark logging handler logging output to the NT Event Log .
Similar to logging . handlers . NTEventLogHandler .""" | return wrap_log_handler ( logging . handlers . NTEventLogHandler ( appname , dllname = dllname , logtype = logtype ) ) |
def hot_threads ( self , node_id = None , params = None ) :
"""An API allowing to get the current hot threads on each node in the cluster .
` < https : / / www . elastic . co / guide / en / elasticsearch / reference / current / cluster - nodes - hot - threads . html > ` _
: arg node _ id : A comma - separated list of node IDs or names to limit the
returned information ; use ` _ local ` to return information from the
node you ' re connecting to , leave empty to get information from all
nodes
: arg type : The type to sample ( default : cpu ) , valid choices are :
' cpu ' , ' wait ' , ' block '
: arg ignore _ idle _ threads : Don ' t show threads that are in known - idle
places , such as waiting on a socket select or pulling from an empty
task queue ( default : true )
: arg interval : The interval for the second sampling of threads
: arg snapshots : Number of samples of thread stacktrace ( default : 10)
: arg threads : Specify the number of threads to provide information for
( default : 3)
: arg timeout : Explicit operation timeout""" | # avoid python reserved words
if params and "type_" in params :
params [ "type" ] = params . pop ( "type_" )
return self . transport . perform_request ( "GET" , _make_path ( "_cluster" , "nodes" , node_id , "hotthreads" ) , params = params ) |
def find_needed_formatter ( input_format , output_format ) :
"""Find a data formatter given an input and output format
input _ format - needed input format . see utils . input . dataformats
output _ format - needed output format . see utils . input . dataformats""" | # Only take the formatters in the registry
selected_registry = [ re . cls for re in registry if re . category == RegistryCategories . formatters ]
needed_formatters = [ ]
for formatter in selected_registry : # Initialize the formatter ( needed so it can discover its formats )
formatter_inst = formatter ( )
if input_format in formatter_inst . input_formats and output_format in formatter_inst . output_formats :
needed_formatters . append ( formatter )
if len ( needed_formatters ) > 0 :
return needed_formatters [ 0 ]
return None |
def push_source ( self , newstream , newfile = None ) :
"Push an input source onto the lexer ' s input source stack ." | if isinstance ( newstream , basestring ) :
newstream = StringIO ( newstream )
self . filestack . appendleft ( ( self . infile , self . instream , self . lineno ) )
self . infile = newfile
self . instream = newstream
self . lineno = 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.