signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def toSolarDate ( self ) :
'''> > > LunarDate ( 1900 , 1 , 1 ) . toSolarDate ( )
datetime . date ( 1900 , 1 , 31)
> > > LunarDate ( 2008 , 9 , 4 ) . toSolarDate ( )
datetime . date ( 2008 , 10 , 2)
> > > LunarDate ( 1976 , 8 , 8 , 1 ) . toSolarDate ( )
datetime . date ( 1976 , 10 , 1)
> > > LunarDate ( 2004 , 1 , 30 ) . toSolarDate ( )
Traceback ( most recent call last ) :
ValueError : day out of range
> > > LunarDate ( 2004 , 13 , 1 ) . toSolarDate ( )
Traceback ( most recent call last ) :
ValueError : month out of range
> > > LunarDate ( 2100 , 1 , 1 ) . toSolarDate ( )
Traceback ( most recent call last ) :
ValueError : year out of range [ 1900 , 2100)'''
|
def _calcDays ( yearInfo , month , day , isLeapMonth ) :
isLeapMonth = int ( isLeapMonth )
res = 0
ok = False
for _month , _days , _isLeapMonth in self . _enumMonth ( yearInfo ) :
if ( _month , _isLeapMonth ) == ( month , isLeapMonth ) :
if 1 <= day <= _days :
res += day - 1
return res
else :
raise ValueError ( "day out of range" )
res += _days
raise ValueError ( "month out of range" )
offset = 0
start_year = 1900
end_year = start_year + len ( yearInfos )
if start_year < 1900 or self . year >= end_year :
raise ValueError ( 'year out of range [{}, {})' . format ( start_year , end_year ) )
yearIdx = self . year - 1900
for i in range ( yearIdx ) :
offset += yearDays [ i ]
offset += _calcDays ( yearInfos [ yearIdx ] , self . month , self . day , self . isLeapMonth )
return self . _startDate + datetime . timedelta ( days = offset )
|
def set_lacp_timeout ( self , name , value = None ) :
"""Configures the Port - Channel LACP fallback timeout
The fallback timeout configures the period an interface in
fallback mode remains in LACP mode without receiving a PDU .
Args :
name ( str ) : The Port - Channel interface name
value ( int ) : port - channel lacp fallback timeout in seconds
Returns :
True if the operation succeeds otherwise False is returned"""
|
commands = [ 'interface %s' % name ]
string = 'port-channel lacp fallback timeout'
commands . append ( self . command_builder ( string , value = value ) )
return self . configure ( commands )
|
def update_bounds ( self , bounds ) :
'''Update cylinders start and end positions'''
|
starts = bounds [ : , 0 , : ]
ends = bounds [ : , 1 , : ]
self . bounds = bounds
self . lengths = np . sqrt ( ( ( ends - starts ) ** 2 ) . sum ( axis = 1 ) )
vertices , normals , colors = self . _process_reference ( )
self . tr . update_vertices ( vertices )
self . tr . update_normals ( normals )
|
def modify ( self , request , nodes , namespace , root_id , post_cut , breadcrumb ) :
"""Modify nodes of a menu"""
|
if breadcrumb :
return nodes
for node in nodes :
if node . attr . get ( 'hidden' ) :
node . visible = False
return nodes
|
def reduce ( self , target_map , target_reduce , threads = 0 ) :
"""map / reduce this query among a bunch of processes
: param target _ map : callable , this function will be called once for each
row this query pulls out of the db , if you want something about the row
to be seen by the target _ reduce function return that value from this function
and it will be queued for the target _ reduce function to process it
: param target _ reduce : callable , this function will be called for any non
None value that the target _ map function returns
: param threads : integer , if not passed in this will be pegged to how many
cpus python detects , which is almost always what you want"""
|
if not threads :
threads = multiprocessing . cpu_count ( )
# we subtract one for the main process
map_threads = threads - 1 if threads > 1 else 1
q = self . copy ( )
limit = q . bounds . limit
offset = q . bounds . offset
total_count = limit if limit else q . count ( )
limit_count = int ( math . ceil ( float ( total_count ) / float ( map_threads ) ) )
logger . info ( "{} processes will handle {} rows each for a total of {}" . format ( map_threads , limit_count , total_count ) )
queue = multiprocessing . JoinableQueue ( )
# close all open db global connections just in case , because we can ' t be sure
# what the target _ map methods are going to do , we want them to re - open connections
# that they need
interfaces = get_interfaces ( )
for name , inter in interfaces . items ( ) :
inter . close ( )
# just in case we also close the query connection since it can in theory
# be non - global
q . interface . close ( )
ts = [ ]
for page in range ( map_threads ) :
q = self . copy ( )
q . limit ( limit_count ) . offset ( offset + ( limit_count * page ) )
t = ReduceThread ( target = target_map , query = q , queue = queue , )
t . start ( )
ts . append ( t )
while ts or not queue . empty ( ) :
try :
val = queue . get ( True , 1.0 )
target_reduce ( val )
except queues . Empty :
pass
else :
queue . task_done ( )
# faster than using any ( ( t . is _ alive ( ) for t in mts ) )
ts = [ t for t in ts if t . is_alive ( ) ]
|
def list_metrics ( self , project , page_size = 0 , page_token = None ) :
"""List metrics for the project associated with this client .
: type project : str
: param project : ID of the project whose metrics are to be listed .
: type page _ size : int
: param page _ size : maximum number of metrics to return , If not passed ,
defaults to a value set by the API .
: type page _ token : str
: param page _ token : opaque marker for the next " page " of metrics . If not
passed , the API will return the first page of
metrics .
: rtype : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: returns : Iterator of
: class : ` ~ google . cloud . logging . metric . Metric `
accessible to the current API ."""
|
path = "projects/%s" % ( project , )
page_iter = self . _gapic_api . list_log_metrics ( path , page_size = page_size )
page_iter . client = self . _client
page_iter . next_page_token = page_token
page_iter . item_to_value = _item_to_metric
return page_iter
|
def update_metadata ( self , key : str , msg : MaildirMessage ) -> None :
"""Uses : func : ` os . rename ` to atomically update the message filename
based on : meth : ` ~ mailbox . MaildirMessage . get _ info ` ."""
|
subpath = self . _lookup ( key )
subdir , name = os . path . split ( subpath )
new_subdir = msg . get_subdir ( )
new_name = key + self . colon + msg . get_info ( )
if subdir != new_subdir :
raise ValueError ( 'Message subdir may not be updated' )
elif name != new_name :
new_subpath = os . path . join ( msg . get_subdir ( ) , new_name )
old_path = os . path . join ( self . _path , subpath )
new_path = os . path . join ( self . _path , new_subpath )
os . rename ( old_path , new_path )
self . _toc [ key ] = new_subpath
|
def destroy ( self ) :
"""Cleanup the activty lifecycle listener"""
|
if self . widget :
self . set_active ( False )
super ( AndroidBarcodeView , self ) . destroy ( )
|
def line_type ( self , line_type ) :
"""Sets the line _ type of this ChartSettings .
Plot interpolation type . linear is default # noqa : E501
: param line _ type : The line _ type of this ChartSettings . # noqa : E501
: type : str"""
|
allowed_values = [ "linear" , "step-before" , "step-after" , "basis" , "cardinal" , "monotone" ]
# noqa : E501
if line_type not in allowed_values :
raise ValueError ( "Invalid value for `line_type` ({0}), must be one of {1}" # noqa : E501
. format ( line_type , allowed_values ) )
self . _line_type = line_type
|
def as_constraint ( cls , constraint , model , constraint_type = None , ** init_kwargs ) :
"""Initiate a Model which should serve as a constraint . Such a
constraint - model should be initiated with knowledge of another
` ` BaseModel ` ` , from which it will take its parameters : :
model = Model ( { y : a * x + b } )
constraint = Model . as _ constraint ( Eq ( a , 1 ) , model )
` ` constraint . params ` ` will be ` ` [ a , b ] ` ` instead of ` ` [ a ] ` ` .
: param constraint : An ` ` Expr ` ` , a mapping or iterable of ` ` Expr ` ` , or a
` ` Relational ` ` .
: param model : An instance of ( a subclass of )
: class : ` ~ symfit . core . fit . BaseModel ` .
: param constraint _ type : When ` ` constraint ` ` is not
a : class : ` ~ sympy . core . relational . Relational ` , a
: class : ` ~ sympy . core . relational . Relational ` has to be provided
explicitly .
: param kwargs : Any additional keyword arguments which will be passed on
to the init method ."""
|
allowed_types = [ sympy . Eq , sympy . Ge , sympy . Le ]
if isinstance ( constraint , Relational ) :
constraint_type = constraint . __class__
constraint = constraint . lhs - constraint . rhs
# Initiate the constraint model , in such a way that we take care
# of any dependencies
instance = cls . with_dependencies ( constraint , dependency_model = model , ** init_kwargs )
# Check if the constraint _ type is allowed , and flip the sign if needed
if constraint_type not in allowed_types :
raise ModelError ( 'Only constraints of the type {} are allowed. A constraint' ' of type {} was provided.' . format ( allowed_types , constraint_type ) )
elif constraint_type is sympy . Le : # We change this to a Ge and flip the sign
instance = - instance
constraint_type = sympy . Ge
instance . constraint_type = constraint_type
if len ( instance . dependent_vars ) != 1 :
raise ModelError ( 'Only scalar models can be used as constraints.' )
# self . params has to be a subset of model . params
if set ( instance . params ) <= set ( model . params ) :
instance . params = model . params
else :
raise ModelError ( 'The parameters of ``constraint`` have to be a ' 'subset of those of ``model``.' )
return instance
|
def inject ( self ) :
"""inject code into sitecustomize . py that will inject pout into the builtins
so it will be available globally"""
|
if self . is_injected ( ) :
return False
with open ( self , mode = "a+" ) as fp :
fp . seek ( 0 )
fp . write ( "\n" . join ( [ "" , "try:" , " import pout" , "except ImportError:" , " pass" , "else:" , " pout.inject()" , "" , ] ) )
return True
|
def limit ( self , value ) :
"""Allows for limiting number of results returned for query . Useful
for pagination ."""
|
self . _query = self . _query . limit ( value )
return self
|
def message_to_objects ( message : str , sender : str , sender_key_fetcher : Callable [ [ str ] , str ] = None , user : UserType = None , ) -> List :
"""Takes in a message extracted by a protocol and maps it to entities .
: param message : XML payload
: type message : str
: param sender : Payload sender id
: type message : str
: param sender _ key _ fetcher : Function to fetch sender public key . If not given , key will always be fetched
over network . The function should take sender handle as the only parameter .
: param user : Optional receiving user object . If given , should have a ` handle ` .
: returns : list of entities"""
|
doc = etree . fromstring ( message )
if doc . tag in TAGS :
return element_to_objects ( doc , sender , sender_key_fetcher , user )
return [ ]
|
def push ( self , cart , env = None , callback = None ) :
"""` cart ` - Release cart to push items from
` callback ` - Optional callback to call if juicer . utils . upload _ rpm succeeds
Pushes the items in a release cart to the pre - release environment ."""
|
juicer . utils . Log . log_debug ( "Initializing push of cart '%s'" % cart . cart_name )
if not env :
env = self . _defaults [ 'start_in' ]
cart . current_env = env
self . sign_cart_for_env_maybe ( cart , env )
self . upload ( env , cart , callback )
return True
|
def changelog ( build ) :
"""create a changelog"""
|
build . packages . install ( "gitchangelog" )
changelog_text = subprocess . check_output ( [ "gitchangelog" , "HEAD...v0.2.9" ] )
with open ( os . path . join ( build . root , "CHANGELOG" ) , "wb+" ) as fh :
fh . write ( changelog_text )
|
def on_press ( callback , suppress = False ) :
"""Invokes ` callback ` for every KEY _ DOWN event . For details see ` hook ` ."""
|
return hook ( lambda e : e . event_type == KEY_UP or callback ( e ) , suppress = suppress )
|
def pollNextEvent ( self , pEvent ) :
"""Returns true and fills the event with the next event on the queue if there is one . If there are no events
this method returns false . uncbVREvent should be the size in bytes of the VREvent _ t struct"""
|
fn = self . function_table . pollNextEvent
result = fn ( byref ( pEvent ) , sizeof ( VREvent_t ) )
return result != 0
|
def parse_file_path ( cls , file_path ) :
"""Parse a file address path without the file specifier"""
|
address = None
pattern = cls . file_regex . match ( file_path )
if pattern :
address = pattern . group ( 1 )
return address
|
def __update_filter ( self ) :
"""Create a combined filter . Set the resulting filter into the document controller ."""
|
filters = list ( )
if self . __date_filter :
filters . append ( self . __date_filter )
if self . __text_filter :
filters . append ( self . __text_filter )
self . document_controller . display_filter = ListModel . AndFilter ( filters )
|
def _update_estimate_and_sampler ( self , ell , ell_hat , weight , extra_info , ** kwargs ) :
"""Update the BB models and the estimates"""
|
stratum_idx = extra_info [ 'stratum' ]
self . _BB_TP . update ( ell * ell_hat , stratum_idx )
self . _BB_PP . update ( ell_hat , stratum_idx )
self . _BB_P . update ( ell , stratum_idx )
# Update model covariance matrix for stratum _ idx
self . _update_cov_model ( strata_to_update = [ stratum_idx ] )
# Update F - measure estimate , estimator variance , exp . variance decrease
self . _update_estimates ( )
|
def ensure_path_exists ( self ) : # type : ( LocalDestinationPath ) - > None
"""Ensure path exists
: param LocalDestinationPath self : this"""
|
if self . _is_dir is None :
raise RuntimeError ( 'is_dir not set' )
if self . _is_dir :
self . _path . mkdir ( mode = 0o750 , parents = True , exist_ok = True )
else :
if self . _path . exists ( ) and self . _path . is_dir ( ) :
raise RuntimeError ( ( 'destination path {} already exists and is a ' 'directory' ) . format ( self . _path ) )
else : # ensure parent path exists and is created
self . _path . parent . mkdir ( mode = 0o750 , parents = True , exist_ok = True )
|
def generic_visit ( self , node ) :
"""Handle expressions we don ' t have custom code for ."""
|
assert isinstance ( node , ast . expr )
res = self . assign ( node )
return res , self . explanation_param ( self . display ( res ) )
|
def authenticate ( self , client_id , client_secret , use_cache = True ) :
"""Authenticate the given client against UAA . The resulting token
will be cached for reuse ."""
|
# We will reuse a token for as long as we have one cached
# and it hasn ' t expired .
if use_cache :
client = self . _get_client_from_cache ( client_id )
if ( client ) and ( not self . is_expired_token ( client ) ) :
self . authenticated = True
self . client = client
return
# Let ' s authenticate the client
client = { 'id' : client_id , 'secret' : client_secret }
res = self . _authenticate_client ( client_id , client_secret )
client . update ( res )
expires = datetime . datetime . now ( ) + datetime . timedelta ( seconds = res [ 'expires_in' ] )
client [ 'expires' ] = expires . isoformat ( )
# Cache it for repeated use until expired
self . _write_to_uaa_cache ( client )
self . client = client
self . authenticated = True
|
def generate_empty_dicts ( size ) :
"""This function creates a list populated with empty dictionaries .
Examples :
generate _ empty _ dicts ( 5 ) - > [ { } , { } , { } , { } , { } ]
generate _ empty _ dicts ( 6 ) - > [ { } , { } , { } , { } , { } , { } ]
generate _ empty _ dicts ( 7 ) - > [ { } , { } , { } , { } , { } , { } , { } ]
: param size : Determines the number of empty dictionaries inside the list .
: return : Returns a list of empty dictionaries based on the given size ."""
|
return [ { } for _ in range ( size ) ]
|
def commit_offsets_async ( self , offsets , callback = None ) :
"""Commit specific offsets asynchronously .
Arguments :
offsets ( dict { TopicPartition : OffsetAndMetadata } ) : what to commit
callback ( callable , optional ) : called as callback ( offsets , response )
response will be either an Exception or a OffsetCommitResponse
struct . This callback can be used to trigger custom actions when
a commit request completes .
Returns :
kafka . future . Future"""
|
self . _invoke_completed_offset_commit_callbacks ( )
if not self . coordinator_unknown ( ) :
future = self . _do_commit_offsets_async ( offsets , callback )
else : # we don ' t know the current coordinator , so try to find it and then
# send the commit or fail ( we don ' t want recursive retries which can
# cause offset commits to arrive out of order ) . Note that there may
# be multiple offset commits chained to the same coordinator lookup
# request . This is fine because the listeners will be invoked in the
# same order that they were added . Note also that BaseCoordinator
# prevents multiple concurrent coordinator lookup requests .
future = self . lookup_coordinator ( )
future . add_callback ( lambda r : functools . partial ( self . _do_commit_offsets_async , offsets , callback ) ( ) )
if callback :
future . add_errback ( lambda e : self . completed_offset_commits . appendleft ( ( callback , offsets , e ) ) )
# ensure the commit has a chance to be transmitted ( without blocking on
# its completion ) . Note that commits are treated as heartbeats by the
# coordinator , so there is no need to explicitly allow heartbeats
# through delayed task execution .
self . _client . poll ( timeout_ms = 0 )
# no wakeup if we add that feature
return future
|
def fetch_messages ( self ) :
"""Sends FetchRequests for all topic / partitions set for consumption
Returns :
Generator that yields KafkaMessage structs
after deserializing with the configured ` deserializer _ class `
Note :
Refreshes metadata on errors , and resets fetch offset on
OffsetOutOfRange , per the configured ` auto _ offset _ reset ` policy
See Also :
Key KafkaConsumer configuration parameters :
* ` fetch _ message _ max _ bytes `
* ` fetch _ max _ wait _ ms `
* ` fetch _ min _ bytes `
* ` deserializer _ class `
* ` auto _ offset _ reset `"""
|
max_bytes = self . _config [ 'fetch_message_max_bytes' ]
max_wait_time = self . _config [ 'fetch_wait_max_ms' ]
min_bytes = self . _config [ 'fetch_min_bytes' ]
if not self . _topics :
raise KafkaConfigurationError ( 'No topics or partitions configured' )
if not self . _offsets . fetch :
raise KafkaConfigurationError ( 'No fetch offsets found when calling fetch_messages' )
fetches = [ FetchRequest ( topic , partition , self . _offsets . fetch [ ( topic , partition ) ] , max_bytes ) for ( topic , partition ) in self . _topics ]
# send _ fetch _ request will batch topic / partition requests by leader
responses = self . _client . send_fetch_request ( fetches , max_wait_time = max_wait_time , min_bytes = min_bytes , fail_on_error = False )
for resp in responses :
if isinstance ( resp , FailedPayloadsError ) :
logger . warning ( 'FailedPayloadsError attempting to fetch data' )
self . _refresh_metadata_on_error ( )
continue
topic = kafka_bytestring ( resp . topic )
partition = resp . partition
try :
check_error ( resp )
except OffsetOutOfRangeError :
logger . warning ( 'OffsetOutOfRange: topic %s, partition %d, ' 'offset %d (Highwatermark: %d)' , topic , partition , self . _offsets . fetch [ ( topic , partition ) ] , resp . highwaterMark )
# Reset offset
self . _offsets . fetch [ ( topic , partition ) ] = ( self . _reset_partition_offset ( ( topic , partition ) ) )
continue
except NotLeaderForPartitionError :
logger . warning ( "NotLeaderForPartitionError for %s - %d. " "Metadata may be out of date" , topic , partition )
self . _refresh_metadata_on_error ( )
continue
except RequestTimedOutError :
logger . warning ( "RequestTimedOutError for %s - %d" , topic , partition )
continue
# Track server highwater mark
self . _offsets . highwater [ ( topic , partition ) ] = resp . highwaterMark
# Yield each message
# Kafka - python could raise an exception during iteration
# we are not catching - - user will need to address
for ( offset , message ) in resp . messages : # deserializer _ class could raise an exception here
val = self . _config [ 'deserializer_class' ] ( message . value )
msg = KafkaMessage ( topic , partition , offset , message . key , val )
# in some cases the server will return earlier messages
# than we requested . skip them per kafka spec
if offset < self . _offsets . fetch [ ( topic , partition ) ] :
logger . debug ( 'message offset less than fetched offset ' 'skipping: %s' , msg )
continue
# Only increment fetch offset
# if we safely got the message and deserialized
self . _offsets . fetch [ ( topic , partition ) ] = offset + 1
# Then yield to user
yield msg
|
def get_name_record ( name , include_history = False , include_expired = False , include_grace = True , proxy = None , hostport = None , history_page = None ) :
"""Get the record for a name or a subdomain . Optionally include its history , and optionally return an expired name or a name in its grace period .
Return the blockchain - extracted information on success .
Return { ' error ' : . . . } on error
In particular , return { ' error ' : ' Not found . ' } if the name isn ' t registered
If include _ expired is True , then a name record will be returned even if it expired
If include _ expired is False , but include _ grace is True , then the name record will be returned even if it is expired and in the grace period"""
|
if isinstance ( name , ( str , unicode ) ) : # coerce string
name = str ( name )
assert proxy or hostport , 'Need either proxy handle or hostport string'
if proxy is None :
proxy = connect_hostport ( hostport )
# what do we expect ?
required = None
is_blockstack_id = False
is_blockstack_subdomain = False
if is_name_valid ( name ) : # full name
required = NAMEOP_SCHEMA_REQUIRED [ : ]
is_blockstack_id = True
elif is_subdomain ( name ) : # subdomain
required = SUBDOMAIN_SCHEMA_REQUIRED [ : ]
is_blockstack_subdomain = True
else : # invalid
raise ValueError ( "Not a valid name or subdomain: {}" . format ( name ) )
if include_history :
required += [ 'history' ]
nameop_schema = { 'type' : 'object' , 'properties' : NAMEOP_SCHEMA_PROPERTIES , 'required' : required }
rec_schema = { 'type' : 'object' , 'properties' : { 'record' : nameop_schema , } , 'required' : [ 'record' ] , }
resp_schema = json_response_schema ( rec_schema )
resp = { }
lastblock = None
try :
if include_history :
resp = get_name_and_history ( name , proxy = proxy , history_page = history_page )
if 'error' in resp : # fall back to legacy path
log . debug ( resp )
resp = proxy . get_name_blockchain_record ( name )
else :
resp = proxy . get_name_record ( name )
resp = json_validate ( resp_schema , resp )
if json_is_error ( resp ) :
if resp [ 'error' ] == 'Not found.' :
return { 'error' : 'Not found.' , 'http_status' : resp . get ( 'http_status' , 404 ) }
return resp
lastblock = resp [ 'lastblock' ]
except socket . timeout :
log . error ( "Connection timed out" )
resp = { 'error' : 'Connection to remote host timed out.' , 'http_status' : 503 }
return resp
except socket . error as se :
log . error ( "Connection error {}" . format ( se . errno ) )
resp = { 'error' : 'Connection to remote host failed.' , 'http_status' : 502 }
return resp
except ValidationError as e :
if BLOCKSTACK_DEBUG :
log . exception ( e )
resp = { 'error' : 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.' , 'http_status' : 502 }
return resp
except Exception as ee :
if BLOCKSTACK_DEBUG :
log . exception ( ee )
log . error ( "Caught exception while connecting to Blockstack node: {}" . format ( ee ) )
resp = { 'error' : 'Failed to contact Blockstack node. Try again with `--debug`.' , 'http_status' : 500 }
return resp
if not include_expired and is_blockstack_id : # check expired
if lastblock is None :
return { 'error' : 'No lastblock given from server' , 'http_status' : 503 }
if include_grace : # only care if the name is beyond the grace period
if lastblock >= int ( resp [ 'record' ] [ 'renewal_deadline' ] ) and int ( resp [ 'record' ] [ 'renewal_deadline' ] ) > 0 :
return { 'error' : 'Name expired' , 'http_status' : 404 }
elif int ( resp [ 'record' ] [ 'renewal_deadline' ] ) > 0 and lastblock >= int ( resp [ 'record' ] [ 'expire_block' ] ) and lastblock < int ( resp [ 'record' ] [ 'renewal_deadline' ] ) :
resp [ 'record' ] [ 'grace_period' ] = True
else :
resp [ 'record' ] [ 'grace_period' ] = False
else : # only care about expired , even if it ' s in the grace period
if lastblock > int ( resp [ 'record' ] [ 'expire_block' ] ) and int ( resp [ 'record' ] [ 'expire_block' ] ) > 0 :
return { 'error' : 'Name expired' , 'http_status' : 404 }
return resp [ 'record' ]
|
def _einsum_equation ( input_shapes , output_shape ) :
"""Turn shapes into an einsum equation .
e . g . " ij , jk - > ik "
Args :
input _ shapes : a list of Shapes
output _ shape : a Shape
Returns :
a string"""
|
ret = [ ]
next_letter = ord ( "a" )
dim_to_letter = { }
for shape_num , shape in enumerate ( input_shapes + [ output_shape ] ) :
if shape_num == len ( input_shapes ) :
ret . append ( "->" )
elif shape_num > 0 :
ret . append ( "," )
for d in shape . dims :
if d not in dim_to_letter :
dim_to_letter [ d ] = chr ( next_letter )
next_letter += 1
ret . append ( dim_to_letter [ d ] )
return "" . join ( ret )
|
def main ( ) :
"""NAME
vector _ mean . py
DESCRIPTION
calculates vector mean of vector data
INPUT FORMAT
takes dec , inc , int from an input file
SYNTAX
vector _ mean . py [ command line options ] [ < filename ]
OPTIONS
- h prints help message and quits
- f FILE , specify input file
- F FILE , specify output file
< filename for reading from standard input
OUTPUT
mean dec , mean inc , R , N"""
|
if '-h' in sys . argv : # check if help is needed
print ( main . __doc__ )
sys . exit ( )
# graceful quit
if '-f' in sys . argv :
dat = [ ]
ind = sys . argv . index ( '-f' )
file = sys . argv [ ind + 1 ]
else :
file = sys . stdin
# read from standard input
ofile = ""
if '-F' in sys . argv :
ind = sys . argv . index ( '-F' )
ofile = sys . argv [ ind + 1 ]
out = open ( ofile , 'w + a' )
DIIs = numpy . loadtxt ( file , dtype = numpy . float )
# read in the data
vpars , R = pmag . vector_mean ( DIIs )
outstring = '%7.1f %7.1f %10.3e %i' % ( vpars [ 0 ] , vpars [ 1 ] , R , len ( DIIs ) )
if ofile == "" :
print ( outstring )
else :
out . write ( outstring + "\n" )
|
def do_REMOTE ( self , target : str , remote_command : str , source : list , * args , ** kwargs ) -> None :
"""Send a remote command to a service . Used
Args :
target : The service that the command gets set to
remote _ command : The command to do remotely .
source : the binary source of the zmq _ socket . Packed to send to the"""
|
if target == self . messaging . _service_name :
info = 'target for remote command is the bot itself! Returning the function'
self . logger . info ( info )
return self . _handle_command ( remote_command , source , * args , ** kwargs )
try :
target = self . messaging . _address_map [ target ]
except KeyError :
warn = ' Target %s, not found in addresses. Are you sure that %s sent an IDENT message?'
self . logger . warn ( warn , target , target )
# TODO : raise an error instead of returning ?
# NOTE : Bail here since there ' s no point in going forward
return
self . logger . info ( ' REMOTE %s, target: %s | %s, %s' , remote_command , target , args , kwargs )
# package the binary together
source = target + source
self . messaging . send_command_response ( source , remote_command , * args , ** kwargs )
|
def clear_other_texts ( self , remove = False ) :
"""Make sure that no other text is a the same position as this one
This method clears all text instances in the figure that are at the
same position as the : attr : ` _ text ` attribute
Parameters
remove : bool
If True , the Text instances are permanently deleted from the
figure , otherwise there text is simply set to ' '"""
|
fig = self . ax . get_figure ( )
# don ' t do anything if our figtitle is the only Text instance
if len ( fig . texts ) == 1 :
return
for i , text in enumerate ( fig . texts ) :
if text == self . _text :
continue
if text . get_position ( ) == self . _text . get_position ( ) :
if not remove :
text . set_text ( '' )
else :
del fig [ i ]
|
def iter_points ( self ) :
"returns a list of tuples of names and values"
|
if not self . is_discrete ( ) :
raise ValueError ( "Patch is not discrete" )
names = sorted ( self . sets . keys ( ) )
icoords = [ self . sets [ name ] . iter_members ( ) for name in names ]
for coordinates in product ( * icoords ) :
yield tuple ( zip ( names , coordinates ) )
|
def decrypt ( receiver_prvhex : str , msg : bytes ) -> bytes :
"""Decrypt with eth private key
Parameters
receiver _ pubhex : str
Receiver ' s ethereum private key hex string
msg : bytes
Data to decrypt
Returns
bytes
Plain text"""
|
pubkey = msg [ 0 : 65 ]
# pubkey ' s length is 65 bytes
encrypted = msg [ 65 : ]
sender_public_key = hex2pub ( pubkey . hex ( ) )
private_key = hex2prv ( receiver_prvhex )
aes_key = derive ( private_key , sender_public_key )
return aes_decrypt ( aes_key , encrypted )
|
def parse_condition ( self , query , prev_key = None , last_prev_key = None ) :
"""Creates a recursive generator for parsing some types of Query ( )
conditions
: param query : Query object
: param prev _ key : The key at the next - higher level
: return : generator object , the last of which will be the complete
Query ( ) object containing all conditions"""
|
# use this to determine gt / lt / eq on prev _ query
logger . debug ( u'query: {} prev_query: {}' . format ( query , prev_key ) )
q = Query ( )
conditions = None
# deal with the { ' name ' : value } case by injecting a previous key
if not prev_key :
temp_query = copy . deepcopy ( query )
k , v = temp_query . popitem ( )
prev_key = k
# deal with the conditions
for key , value in query . items ( ) :
logger . debug ( u'conditions: {} {}' . format ( key , value ) )
if key == u'$gte' :
conditions = ( Q ( q , prev_key ) >= value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) >= value ) ) if prev_key != "$not" else ( q [ last_prev_key ] < value )
elif key == u'$gt' :
conditions = ( Q ( q , prev_key ) > value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) > value ) ) if prev_key != "$not" else ( q [ last_prev_key ] <= value )
elif key == u'$lte' :
conditions = ( Q ( q , prev_key ) <= value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) <= value ) ) if prev_key != "$not" else ( q [ last_prev_key ] > value )
elif key == u'$lt' :
conditions = ( Q ( q , prev_key ) < value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) < value ) ) if prev_key != "$not" else ( q [ last_prev_key ] >= value )
elif key == u'$ne' :
conditions = ( Q ( q , prev_key ) != value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) != value ) ) if prev_key != "$not" else ( q [ last_prev_key ] == value )
elif key == u'$not' :
if not isinstance ( value , dict ) and not isinstance ( value , list ) :
conditions = ( Q ( q , prev_key ) != value ) if not conditions and prev_key != "$not" else ( conditions & ( Q ( q , prev_key ) != value ) ) if prev_key != "$not" else ( q [ last_prev_key ] >= value )
else : # let the value ' s condition be parsed below
pass
elif key == u'$regex' :
value = value . replace ( '\\\\\\' , '|||' )
value = value . replace ( '\\\\' , '|||' )
regex = value . replace ( '\\' , '' )
regex = regex . replace ( '|||' , '\\' )
currCond = ( where ( prev_key ) . matches ( regex ) )
conditions = currCond if not conditions else ( conditions & currCond )
elif key in [ '$and' , '$or' , '$in' , '$all' ] :
pass
else : # don ' t want to use the previous key if this is a secondary key
# ( fixes multiple item query that includes $ codes )
if not isinstance ( value , dict ) and not isinstance ( value , list ) :
conditions = ( ( Q ( q , key ) == value ) | ( Q ( q , key ) . any ( [ value ] ) ) ) if not conditions else ( conditions & ( ( Q ( q , key ) == value ) | ( Q ( q , key ) . any ( [ value ] ) ) ) )
prev_key = key
logger . debug ( u'c: {}' . format ( conditions ) )
if isinstance ( value , dict ) : # yield from self . parse _ condition ( value , key )
for parse_condition in self . parse_condition ( value , key , prev_key ) :
yield parse_condition
elif isinstance ( value , list ) :
if key == '$and' :
grouped_conditions = None
for spec in value :
for parse_condition in self . parse_condition ( spec ) :
grouped_conditions = ( parse_condition if not grouped_conditions else grouped_conditions & parse_condition )
yield grouped_conditions
elif key == '$or' :
grouped_conditions = None
for spec in value :
for parse_condition in self . parse_condition ( spec ) :
grouped_conditions = ( parse_condition if not grouped_conditions else grouped_conditions | parse_condition )
yield grouped_conditions
elif key == '$in' : # use ` any ` to find with list , before comparing to single string
grouped_conditions = Q ( q , prev_key ) . any ( value )
for val in value :
for parse_condition in self . parse_condition ( { prev_key : val } ) :
grouped_conditions = ( parse_condition if not grouped_conditions else grouped_conditions | parse_condition )
yield grouped_conditions
elif key == '$all' :
yield Q ( q , prev_key ) . all ( value )
else :
yield Q ( q , prev_key ) . any ( [ value ] )
else :
yield conditions
|
def format_epilog ( self , ctx , formatter ) :
"""Writes the epilog into the formatter if it exists ."""
|
if self . epilog :
formatter . write_paragraph ( )
with formatter . indentation ( ) :
formatter . write_text ( self . epilog )
|
def register_run_plugins ( self , plugin_name , plugin_class ) :
"""Loads a plugin as a dictionary and attaches needed parts to correct Icetea run
global parts .
: param plugin _ name : Name of the plugins
: param plugin _ class : PluginBase
: return : Nothing"""
|
if plugin_name in self . registered_plugins :
raise PluginException ( "Plugin {} already registered! " "Duplicate plugins?" . format ( plugin_name ) )
self . logger . debug ( "Registering plugin %s" , plugin_name )
if plugin_class . get_allocators ( ) :
register_func = self . plugin_types [ PluginTypes . ALLOCATOR ]
register_func ( plugin_name , plugin_class )
self . registered_plugins . append ( plugin_name )
|
async def ignore_list ( self , ctx ) :
"""Tells you what channels are currently ignored in this server ."""
|
ignored = self . config . get ( 'ignored' , [ ] )
channel_ids = set ( c . id for c in ctx . message . server . channels )
result = [ ]
for channel in ignored :
if channel in channel_ids :
result . append ( '<#{}>' . format ( channel ) )
if result :
await self . bot . responses . basic ( title = "Ignored Channels:" , message = '\n\n{}' . format ( ', ' . join ( result ) ) )
else :
await self . bot . responses . failure ( message = 'I am not ignoring any channels here.' )
|
def get_pymata_version ( self ) :
"""This method retrieves the PyMata version number
: returns : PyMata version number ."""
|
task = asyncio . ensure_future ( self . core . get_pymata_version ( ) )
self . loop . run_until_complete ( task )
|
def pop ( self ) :
"""Pop dir off stack and change to it ."""
|
if len ( self . stack ) :
os . chdir ( self . stack . pop ( ) )
|
def _fit ( self , col ) :
"""Create a map of the empirical probability for each category .
Args :
col ( pandas . DataFrame ) : Data to transform ."""
|
column = col [ self . col_name ] . replace ( { np . nan : np . inf } )
frequencies = column . groupby ( column ) . count ( ) . rename ( { np . inf : None } ) . to_dict ( )
# next set probability ranges on interval [ 0,1]
start = 0
end = 0
num_vals = len ( col )
for val in frequencies :
prob = frequencies [ val ] / num_vals
end = start + prob
interval = ( start , end )
mean = np . mean ( interval )
std = prob / 6
self . probability_map [ val ] = ( interval , mean , std )
start = end
|
def timeout_at ( clock , coro = None , * args ) :
'''Execute the specified coroutine and return its result . However ,
issue a cancellation request to the calling task after seconds
have elapsed . When this happens , a TaskTimeout exception is
raised . If coro is None , the result of this function serves
as an asynchronous context manager that applies a timeout to a
block of statements .
timeout _ after ( ) may be composed with other timeout _ after ( )
operations ( i . e . , nested timeouts ) . If an outer timeout expires
first , then TimeoutCancellationError is raised instead of
TaskTimeout . If an inner timeout expires and fails to properly
TaskTimeout , a UncaughtTimeoutError is raised in the outer
timeout .'''
|
if coro :
return _timeout_after_func ( clock , True , coro , args )
return TimeoutAfter ( clock , absolute = True )
|
def makeAggShkHist ( self ) :
'''Make simulated histories of aggregate transitory and permanent shocks .
Histories are of length self . act _ T , for use in the general equilibrium
simulation . Draws on history of aggregate Markov states generated by
internal call to makeMrkvHist ( ) .
Parameters
None
Returns
None'''
|
self . makeMrkvHist ( )
# Make a ( pseudo ) random sequence of Markov states
sim_periods = self . act_T
# For each Markov state in each simulated period , draw the aggregate shocks
# that would occur in that state in that period
StateCount = self . MrkvArray . shape [ 0 ]
PermShkAggHistAll = np . zeros ( ( StateCount , sim_periods ) )
TranShkAggHistAll = np . zeros ( ( StateCount , sim_periods ) )
for i in range ( StateCount ) :
Events = np . arange ( self . AggShkDstn [ i ] [ 0 ] . size )
# just a list of integers
EventDraws = drawDiscrete ( N = sim_periods , P = self . AggShkDstn [ i ] [ 0 ] , X = Events , seed = 0 )
PermShkAggHistAll [ i , : ] = self . AggShkDstn [ i ] [ 1 ] [ EventDraws ]
TranShkAggHistAll [ i , : ] = self . AggShkDstn [ i ] [ 2 ] [ EventDraws ]
# Select the actual history of aggregate shocks based on the sequence
# of Markov states that the economy experiences
PermShkAggHist = np . zeros ( sim_periods )
TranShkAggHist = np . zeros ( sim_periods )
for i in range ( StateCount ) :
these = i == self . MrkvNow_hist
PermShkAggHist [ these ] = PermShkAggHistAll [ i , these ] * self . PermGroFacAgg [ i ]
TranShkAggHist [ these ] = TranShkAggHistAll [ i , these ]
# Store the histories
self . PermShkAggHist = PermShkAggHist
self . TranShkAggHist = TranShkAggHist
|
def unique_index ( df ) :
"""Assert that the index is unique
Parameters
df : DataFrame
Returns
df : DataFrame"""
|
try :
assert df . index . is_unique
except AssertionError as e :
e . args = df . index . get_duplicates ( )
raise
return df
|
def my_log_message ( verbose , prio , msg ) :
"""Log to syslog , and possibly also to stderr ."""
|
syslog . syslog ( prio , msg )
if verbose or prio == syslog . LOG_ERR :
sys . stderr . write ( "%s\n" % ( msg ) )
|
def __version ( client ) :
'''Grab DRAC version'''
|
versions = { 9 : 'CMC' , 8 : 'iDRAC6' , 10 : 'iDRAC6' , 11 : 'iDRAC6' , 16 : 'iDRAC7' , 17 : 'iDRAC7' }
if isinstance ( client , paramiko . SSHClient ) :
( stdin , stdout , stderr ) = client . exec_command ( 'racadm getconfig -g idRacInfo' )
for i in stdout . readlines ( ) :
if i [ 2 : ] . startswith ( 'idRacType' ) :
return versions . get ( int ( i [ 2 : ] . split ( '=' ) [ 1 ] ) , None )
return None
|
def to_dict ( self ) :
"""Convert the tree node to its dictionary representation .
: return : an expansion dictionary that represents the type and expansions of this tree node .
: rtype dict [ list [ union [ str , unicode ] ] ]"""
|
expansion_strings = [ ]
for expansion in self . expansions :
expansion_strings . extend ( expansion . to_strings ( ) )
return { self . type : expansion_strings , }
|
def toLocalIterator ( self ) :
"""Returns an iterator that contains all of the rows in this : class : ` DataFrame ` .
The iterator will consume as much memory as the largest partition in this DataFrame .
> > > list ( df . toLocalIterator ( ) )
[ Row ( age = 2 , name = u ' Alice ' ) , Row ( age = 5 , name = u ' Bob ' ) ]"""
|
with SCCallSiteSync ( self . _sc ) as css :
sock_info = self . _jdf . toPythonIterator ( )
return _load_from_socket ( sock_info , BatchedSerializer ( PickleSerializer ( ) ) )
|
def remove_action ( i ) :
"""Input : {
( repo _ uoa ) - repo UOA
module _ uoa - normally should be ' module ' already
data _ uoa - UOA of the module to be created
func - action
Output : {
return - return code = 0 , if successful
> 0 , if error
( error ) - error text if return > 0
Output of ' update ' function"""
|
# Check if global writing is allowed
r = check_writing ( { } )
if r [ 'return' ] > 0 :
return r
o = i . get ( 'out' , '' )
ruoa = i . get ( 'repo_uoa' , '' )
muoa = i . get ( 'module_uoa' , '' )
duoa = i . get ( 'data_uoa' , '' )
func = i . get ( 'func' , '' )
if muoa == '' :
return { 'return' : 1 , 'error' : 'module UOA is not defined' }
if duoa != '' :
muoa = duoa
duoa = ''
# Find path to module
ii = { 'module_uoa' : cfg [ 'module_name' ] , 'data_uoa' : muoa }
if ruoa != '' :
ii [ 'repo_uoa' ] = ruoa
r = load ( ii )
if r [ 'return' ] > 0 :
return r
pp = r [ 'path' ]
dd = r [ 'dict' ]
actions = dd . get ( 'actions' , { } )
# Check func and desc
if o == 'con' :
if func == '' :
r = inp ( { 'text' : 'Enter function to be removed (or Enter to quit) - note that we remove only reference to this function from the module meta: ' } )
func = r [ 'string' ]
# Check if empty
if func == '' :
return { 'return' : 1 , 'error' : 'action (function) is not defined' }
if func not in actions :
return { 'return' : 1 , 'error' : 'action (function) is not found in the module' }
del ( actions [ func ] )
dd [ 'actions' ] = actions
# Update data entry
if o == 'con' :
out ( '' )
ii = { 'module_uoa' : cfg [ 'module_name' ] , 'data_uoa' : muoa , 'dict' : dd , 'substitute' : 'yes' , 'sort_keys' : 'yes' , 'out' : o }
if ruoa != '' :
ii [ 'repo_uoa' ] = ruoa
r = update ( ii )
if r [ 'return' ] > 0 :
return r
if o == 'con' :
out ( '' )
out ( 'Reference to the function "' + func + '" was removed from module meta. Function body was not removed from the python code' )
return r
|
def upload_bam_to_s3 ( job , job_vars ) :
"""Upload bam to S3 . Requires S3AM and a ~ / . boto config file ."""
|
input_args , ids = job_vars
work_dir = job . fileStore . getLocalTempDir ( )
uuid = input_args [ 'uuid' ]
# I / O
job . fileStore . readGlobalFile ( ids [ 'alignments.bam' ] , os . path . join ( work_dir , 'alignments.bam' ) )
bam_path = os . path . join ( work_dir , 'alignments.bam' )
sample_name = uuid + '.bam'
# Parse s3 _ dir to get bucket and s3 path
s3_dir = input_args [ 's3_dir' ]
bucket_name = s3_dir . split ( '/' ) [ 0 ]
bucket_dir = os . path . join ( '/' . join ( s3_dir . split ( '/' ) [ 1 : ] ) , 'bam_files' )
# Upload to S3 via S3AM
s3am_command = [ 's3am' , 'upload' , 'file://{}' . format ( bam_path ) , os . path . join ( 's3://' , bucket_name , bucket_dir , sample_name ) ]
subprocess . check_call ( s3am_command )
|
def block ( self , ** kwargs ) :
"""Block the user .
Args :
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabBlockError : If the user could not be blocked
Returns :
bool : Whether the user status has been changed"""
|
path = '/users/%s/block' % self . id
server_data = self . manager . gitlab . http_post ( path , ** kwargs )
if server_data is True :
self . _attrs [ 'state' ] = 'blocked'
return server_data
|
def _gen_sample ( self ) :
"""Generate a random captcha image sample
Returns
( numpy . ndarray , str )
Tuple of image ( numpy ndarray ) and character string of digits used to generate the image"""
|
num_str = self . get_rand ( self . num_digit_min , self . num_digit_max )
return self . captcha . image ( num_str ) , num_str
|
def ip_info ( self , ip , repository_ids = None ) :
"""ip _ info
Returns information about the IP specified in the repository ids
defined ."""
|
if not repository_ids :
repository_ids = [ ]
repos = [ ]
for rid in repository_ids :
repos . append ( { 'id' : rid } )
return self . raw_query ( 'vuln' , 'getIP' , data = { 'ip' : ip , 'repositories' : repos } )
|
def get_process_definition_start ( fname , slug ) :
"""Find the first line of process definition .
The first line of process definition is the line with a slug .
: param str fname : Path to filename with processes
: param string slug : process slug
: return : line where the process definiton starts
: rtype : int"""
|
with open ( fname ) as file_ :
for i , line in enumerate ( file_ ) :
if re . search ( r'slug:\s*{}' . format ( slug ) , line ) :
return i + 1
# In case starting line is not found just return first line
return 1
|
def execPath ( self ) :
"""the executable application ' s path"""
|
vers = self . version . label if self . version else None
# executables in Versions folder are stored by baseVersion ( modified by game data patches )
return self . installedApp . exec_path ( vers )
|
def _create_producer ( self ) :
"""Tries to establish a Kafka consumer connection"""
|
if not self . closed :
try :
self . logger . debug ( "Creating new kafka producer using brokers: " + str ( self . settings [ 'KAFKA_HOSTS' ] ) )
return KafkaProducer ( bootstrap_servers = self . settings [ 'KAFKA_HOSTS' ] , value_serializer = lambda v : json . dumps ( v ) . encode ( 'utf-8' ) , retries = 3 , linger_ms = self . settings [ 'KAFKA_PRODUCER_BATCH_LINGER_MS' ] , buffer_memory = self . settings [ 'KAFKA_PRODUCER_BUFFER_BYTES' ] )
except KeyError as e :
self . logger . error ( 'Missing setting named ' + str ( e ) , { 'ex' : traceback . format_exc ( ) } )
except :
self . logger . error ( "Couldn't initialize kafka producer." , { 'ex' : traceback . format_exc ( ) } )
raise
|
def redraw ( self , whence = 0 ) :
"""Redraw the canvas .
Parameters
whence
See : meth : ` get _ rgb _ object ` ."""
|
with self . _defer_lock :
whence = min ( self . _defer_whence , whence )
if not self . defer_redraw :
if self . _hold_redraw_cnt == 0 :
self . _defer_whence = self . _defer_whence_reset
self . redraw_now ( whence = whence )
else :
self . _defer_whence = whence
return
elapsed = time . time ( ) - self . time_last_redraw
# If there is no redraw scheduled , or we are overdue for one :
if ( not self . _defer_flag ) or ( elapsed > self . defer_lagtime ) : # If more time than defer _ lagtime has passed since the
# last redraw then just do the redraw immediately
if elapsed > self . defer_lagtime :
if self . _hold_redraw_cnt > 0 : # self . _ defer _ flag = True
self . _defer_whence = whence
return
self . _defer_whence = self . _defer_whence_reset
self . logger . debug ( "lagtime expired--forced redraw" )
self . redraw_now ( whence = whence )
return
# Indicate that a redraw is necessary and record whence
self . _defer_flag = True
self . _defer_whence = whence
# schedule a redraw by the end of the defer _ lagtime
secs = self . defer_lagtime - elapsed
self . logger . debug ( "defer redraw (whence=%.2f) in %.f sec" % ( whence , secs ) )
self . reschedule_redraw ( secs )
else : # A redraw is already scheduled . Just record whence .
self . _defer_whence = whence
self . logger . debug ( "update whence=%.2f" % ( whence ) )
|
def draw_progress_bar ( cb , message , value , max_value ) :
""": type cb : cursebox . Cursebox"""
|
m_x = cb . width // 2
m_y = cb . height // 2
w = len ( message ) + 4
h = 3
draw_box ( cb , m_x - w // 2 , m_y - 1 , w , h )
message = " %s " % message
i = int ( ( value / max_value ) * ( len ( message ) + 2 ) )
message = "$" + message [ : i ] + "$" + message [ i : ]
draw_text ( cb , m_x - w // 2 + 1 , m_y , message )
|
def show_ver ( ** kwargs ) :
'''Shortcut to run ` show version ` on the NX - OS device .
. . code - block : : bash
salt ' * ' nxos . cmd show _ ver'''
|
command = 'show version'
info = ''
info = show ( command , ** kwargs )
if isinstance ( info , list ) :
info = info [ 0 ]
return info
|
def name_history_merge ( h1 , h2 ) :
"""Given two name histories ( grouped by block ) , merge them ."""
|
ret = { }
blocks_1 = [ int ( b ) for b in h1 . keys ( ) ]
blocks_2 = [ int ( b ) for b in h2 . keys ( ) ]
# find overlapping blocks
overlap = list ( set ( blocks_1 ) . intersection ( set ( blocks_2 ) ) )
if len ( overlap ) > 0 :
for b in overlap :
h = h1 [ str ( b ) ] + h2 [ str ( b ) ]
h . sort ( lambda v1 , v2 : - 1 if v1 [ 'vtxindex' ] < v2 [ 'vtxindex' ] else 1 )
uniq = [ ]
last_vtxindex = None
for i in range ( 0 , len ( h ) ) :
if h [ i ] [ 'vtxindex' ] != last_vtxindex :
uniq . append ( h [ i ] )
last_vtxindex = h [ i ] [ 'vtxindex' ]
ret [ str ( b ) ] = uniq
all_blocks = list ( set ( blocks_1 + blocks_2 ) )
for b in all_blocks :
if b in overlap :
continue
if b in blocks_1 :
ret [ str ( b ) ] = h1 [ str ( b ) ]
else :
ret [ str ( b ) ] = h2 [ str ( b ) ]
return ret
|
def convert ( self , obj ) :
"""Takes a dict corresponding to the honeybadgerfish JSON blob of the 1.0 . * type and
converts it to BY _ ID _ HONEY _ BADGERFISH version . The object is modified in place
and returned ."""
|
if self . pristine_if_invalid :
raise NotImplementedError ( 'pristine_if_invalid option is not supported yet' )
nex = get_nexml_el ( obj )
assert nex
self . _recursive_convert_dict ( nex )
nex [ '@nexml2json' ] = str ( BADGER_FISH_NEXSON_VERSION )
self . _single_el_list_to_dicts ( nex , 'otus' )
self . _single_el_list_to_dicts ( nex , 'trees' )
# otu and tree are always arrays in phylografter
emulate_phylografter_pluralization = True
if not emulate_phylografter_pluralization :
self . _single_el_list_to_dicts ( nex , 'otus' , 'otu' )
self . _single_el_list_to_dicts ( nex , 'trees' , 'tree' )
self . _single_el_list_to_dicts ( nex , 'trees' , 'tree' , 'node' )
self . _single_el_list_to_dicts ( nex , 'trees' , 'tree' , 'edge' )
return obj
|
def mod2pi ( ts ) :
"""For a timeseries where all variables represent phases ( in radians ) ,
return an equivalent timeseries where all values are in the range ( - pi , pi ]"""
|
return np . pi - np . mod ( np . pi - ts , 2 * np . pi )
|
def extract_spans ( html_string ) :
"""Creates a list of the spanned cell groups of [ row , column ] pairs .
Parameters
html _ string : str
Returns
list of lists of lists of int"""
|
try :
from bs4 import BeautifulSoup
except ImportError :
print ( "ERROR: You must have BeautifulSoup to use html2data" )
return
soup = BeautifulSoup ( html_string , 'html.parser' )
table = soup . find ( 'table' )
if not table :
return [ ]
trs = table . findAll ( 'tr' )
if len ( trs ) == 0 :
return [ ]
spans = [ ]
for tr in range ( len ( trs ) ) :
if tr == 0 :
ths = trs [ tr ] . findAll ( 'th' )
if len ( ths ) == 0 :
ths = trs [ tr ] . findAll ( 'td' )
tds = ths
else :
tds = trs [ tr ] . findAll ( 'td' )
column = 0
for td in tds :
r_span_count = 1
c_span_count = 1
current_column = column
if td . has_attr ( 'rowspan' ) :
r_span_count = int ( td [ 'rowspan' ] )
if td . has_attr ( 'colspan' ) :
c_span_count = int ( td [ 'colspan' ] )
column += c_span_count
else :
column += 1
new_span = [ ]
for r_index in range ( tr , tr + r_span_count ) :
for c_index in range ( current_column , column ) :
if not get_span ( spans , r_index , c_index ) :
new_span . append ( [ r_index , c_index ] )
if len ( new_span ) > 0 :
spans . append ( new_span )
return spans
|
def get_headers ( environ ) :
"""Returns only proper HTTP headers ."""
|
for key , value in environ . iteritems ( ) :
key = str ( key )
if key . startswith ( 'HTTP_' ) and key not in ( 'HTTP_CONTENT_TYPE' , 'HTTP_CONTENT_LENGTH' ) :
yield key [ 5 : ] . replace ( '_' , '-' ) . title ( ) , value
elif key in ( 'CONTENT_TYPE' , 'CONTENT_LENGTH' ) :
yield key . replace ( '_' , '-' ) . title ( ) , value
|
def move_up ( self ) :
"""Move up one level in the hierarchy , unless already on top ."""
|
if self . current_item . parent is not None :
self . current_item = self . current_item . parent
for f in self . _hooks [ "up" ] :
f ( self )
if self . current_item is self . root :
for f in self . _hooks [ "top" ] :
f ( self )
return self
|
def create_result ( self , local_path , container_path , permissions , meta , val , dividers ) :
"""Default permissions to rw"""
|
if permissions is NotSpecified :
permissions = 'rw'
return Mount ( local_path , container_path , permissions )
|
def verify_ocsp ( cls , certificate , issuer ) :
"""Runs OCSP verification and returns error code - 0 means success"""
|
return OCSPVerifier ( certificate , issuer , cls . get_ocsp_url ( ) , cls . get_ocsp_responder_certificate_path ( ) ) . verify ( )
|
def get_batches ( self , batch_size , shuffle = True ) :
"""Get batch iterator
Parameters
batch _ size : int
size of one batch
shuffle : bool
whether to shuffle batches . Don ' t set to True when evaluating on dev or test set .
Returns
tuple
word _ inputs , tag _ inputs , arc _ targets , rel _ targets"""
|
batches = [ ]
for bkt_idx , bucket in enumerate ( self . _buckets ) :
bucket_size = bucket . shape [ 1 ]
n_tokens = bucket_size * self . _bucket_lengths [ bkt_idx ]
n_splits = min ( max ( n_tokens // batch_size , 1 ) , bucket_size )
range_func = np . random . permutation if shuffle else np . arange
for bkt_batch in np . array_split ( range_func ( bucket_size ) , n_splits ) :
batches . append ( ( bkt_idx , bkt_batch ) )
if shuffle :
np . random . shuffle ( batches )
for bkt_idx , bkt_batch in batches :
word_inputs = self . _buckets [ bkt_idx ] [ : , bkt_batch , 0 ]
# word _ id x sent _ id
tag_inputs = self . _buckets [ bkt_idx ] [ : , bkt_batch , 1 ]
arc_targets = self . _buckets [ bkt_idx ] [ : , bkt_batch , 2 ]
rel_targets = self . _buckets [ bkt_idx ] [ : , bkt_batch , 3 ]
yield word_inputs , tag_inputs , arc_targets , rel_targets
|
def _offset_subplot_ids ( fig , offsets ) :
"""Apply offsets to the subplot id numbers in a figure .
Note : This function mutates the input figure dict
Note : This function assumes that the normalize _ subplot _ ids function has
already been run on the figure , so that all layout subplot properties in
use are explicitly present in the figure ' s layout .
Parameters
fig : dict
A plotly figure dict
offsets : dict
A dict from subplot types to the offset to be applied for each subplot
type . This dict matches the form of the dict returned by
get _ max _ subplot _ ids"""
|
# Offset traces
for trace in fig . get ( 'data' , None ) :
trace_type = trace . get ( 'type' , 'scatter' )
subplot_types = _trace_to_subplot . get ( trace_type , [ ] )
for subplot_type in subplot_types :
subplot_prop_name = _get_subplot_prop_name ( subplot_type )
# Compute subplot value prefix
subplot_val_prefix = _get_subplot_val_prefix ( subplot_type )
subplot_val = trace . get ( subplot_prop_name , subplot_val_prefix )
subplot_number = _get_subplot_number ( subplot_val )
offset_subplot_number = ( subplot_number + offsets . get ( subplot_type , 0 ) )
if offset_subplot_number > 1 :
trace [ subplot_prop_name ] = ( subplot_val_prefix + str ( offset_subplot_number ) )
else :
trace [ subplot_prop_name ] = subplot_val_prefix
# layout subplots
layout = fig . setdefault ( 'layout' , { } )
new_subplots = { }
for subplot_type in offsets :
offset = offsets [ subplot_type ]
if offset < 1 :
continue
for layout_prop in list ( layout . keys ( ) ) :
if layout_prop . startswith ( subplot_type ) :
subplot_number = _get_subplot_number ( layout_prop )
new_subplot_number = subplot_number + offset
new_layout_prop = subplot_type + str ( new_subplot_number )
new_subplots [ new_layout_prop ] = layout . pop ( layout_prop )
layout . update ( new_subplots )
# xaxis / yaxis anchors
x_offset = offsets . get ( 'xaxis' , 0 )
y_offset = offsets . get ( 'yaxis' , 0 )
for layout_prop in list ( layout . keys ( ) ) :
if layout_prop . startswith ( 'xaxis' ) :
xaxis = layout [ layout_prop ]
anchor = xaxis . get ( 'anchor' , 'y' )
anchor_number = _get_subplot_number ( anchor ) + y_offset
if anchor_number > 1 :
xaxis [ 'anchor' ] = 'y' + str ( anchor_number )
else :
xaxis [ 'anchor' ] = 'y'
elif layout_prop . startswith ( 'yaxis' ) :
yaxis = layout [ layout_prop ]
anchor = yaxis . get ( 'anchor' , 'x' )
anchor_number = _get_subplot_number ( anchor ) + x_offset
if anchor_number > 1 :
yaxis [ 'anchor' ] = 'x' + str ( anchor_number )
else :
yaxis [ 'anchor' ] = 'x'
# annotations / shapes / images
for layout_prop in [ 'annotations' , 'shapes' , 'images' ] :
for obj in layout . get ( layout_prop , [ ] ) :
if x_offset :
xref = obj . get ( 'xref' , 'x' )
if xref != 'paper' :
xref_number = _get_subplot_number ( xref )
obj [ 'xref' ] = 'x' + str ( xref_number + x_offset )
if y_offset :
yref = obj . get ( 'yref' , 'y' )
if yref != 'paper' :
yref_number = _get_subplot_number ( yref )
obj [ 'yref' ] = 'y' + str ( yref_number + y_offset )
|
def CredibleInterval ( self , percentage = 90 ) :
"""Computes the central credible interval .
If percentage = 90 , computes the 90 % CI .
Args :
percentage : float between 0 and 100
Returns :
sequence of two floats , low and high"""
|
prob = ( 1 - percentage / 100.0 ) / 2
interval = self . Value ( prob ) , self . Value ( 1 - prob )
return interval
|
def convert ( gr , raw_node ) :
"""Convert raw node information to a Node or Leaf instance .
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node , so that the tree is build
strictly bottom - up ."""
|
type , value , context , children = raw_node
if children or type in gr . number2symbol : # If there ' s exactly one child , return that child instead of
# creating a new node .
if len ( children ) == 1 :
return children [ 0 ]
return Node ( type , children , context = context )
else :
return Leaf ( type , value , context = context )
|
def assign_method ( stochastic , scale = None , verbose = - 1 ) :
"""Returns a step method instance to handle a
variable . If several methods have the same competence ,
it picks one arbitrarily ( using set . pop ( ) ) ."""
|
# Retrieve set of best candidates
best_candidates = pick_best_methods ( stochastic )
# Randomly grab and appropriate method
method = best_candidates . pop ( )
failure_header = """Failed attempting to automatically assign step method class %s
to stochastic variable %s. Try setting %s's competence method to return 0
and manually assigning it when appropriate. See the user guide.
Error message: """ % ( method . __name__ , stochastic . __name__ , method . __name__ )
try :
if scale :
out = method ( stochastic , scale = scale , verbose = verbose )
else :
out = method ( stochastic , verbose = verbose )
except :
a , b , c = sys . exc_info ( )
try :
args = list ( b . args )
except AttributeError :
args = [ ]
args . append ( failure_header )
b . args = args
six . reraise ( a , b , c )
return out
|
def main ( ) :
"""Zebrafish :
1 . Map ENSP to ZFIN Ids using Intermine
2 . Map deprecated ENSP IDs to ensembl genes
by querying the ensembl database then use
intermine to resolve to gene IDs
Mouse : Map deprecated ENSP IDs to ensembl genes
by querying the ensembl database then use
intermine to resolve to MGI IDs
Fly : ENSP IDs appear as xrefs on translation IDs
Worm : Use UniProt Mapping file provided by String"""
|
parser = argparse . ArgumentParser ( usage = __doc__ )
parser . add_argument ( '--config' , '-c' , required = True , help = 'JSON configuration file' )
parser . add_argument ( '--out' , '-o' , required = False , help = 'output directory' , default = "./" )
parser . add_argument ( '--use_cache' , '-cached' , action = "store_true" , required = False , help = 'use cached files' , default = False )
args = parser . parse_args ( )
# Hardcoded dir for raw files
out_path = Path ( args . out )
raw_dir = out_path / "out"
raw_dir . mkdir ( parents = True , exist_ok = True )
# Hardcoded unmapped file
VERSION = 'v10.5'
STRING_BASE = "http://string-db.org/download/" "protein.links.detailed.{}" . format ( VERSION )
config_file = open ( args . config , 'r' )
config = yaml . load ( config_file )
config_file . close ( )
out_unmapped_file = out_path / "unmapped_ids.tsv"
unmapped_file = out_unmapped_file . open ( "w" )
# Connect to ensembl
connection = connect_to_database ( host = config [ 'database' ] [ 'host' ] , username = config [ 'database' ] [ 'username' ] , port = config [ 'database' ] [ 'port' ] )
cursor = connection . cursor ( )
# Process MGI eqs #
taxon = config [ 'taxa_specific' ] [ 'mouse' ] [ 'tax_id' ]
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' . format ( taxon , VERSION )
mouse_map_file = out_path / config [ 'taxa_specific' ] [ 'mouse' ] [ 'output_file' ]
mouse_file = mouse_map_file . open ( 'w' )
path = '{}/{}.protein.links.detailed.{}.txt.gz' . format ( STRING_BASE , taxon , VERSION )
if not args . use_cache :
download_file ( path , dump_file )
ensembl = Ensembl ( "rdf_graph" , True )
p2gene_map = ensembl . fetch_protein_gene_map ( taxon )
fh = gzip . open ( str ( dump_file ) , 'rb' )
df = pd . read_csv ( fh , sep = '\s+' )
fh . close ( )
proteins = pd . unique ( df [ [ 'protein1' , 'protein2' ] ] . values . ravel ( ) )
logger . info ( "Processing {} proteins" . format ( len ( proteins ) ) )
for protein in proteins :
prot = protein . replace ( '{}.' . format ( str ( taxon ) ) , '' )
try :
ens_gene = p2gene_map [ prot ]
ens_curie = "ENSEMBL:{}" . format ( ens_gene )
mouse_file . write ( "{}\t{}\n" . format ( prot , ens_curie ) )
continue
except KeyError :
pass
ens_gene = get_deprecated_protein_gene_rel ( cursor , prot , config [ 'taxa_specific' ] [ 'mouse' ] [ 'ensembl' ] , config )
intermine_resp = query_mousemine ( config [ 'taxa_specific' ] [ 'mouse' ] [ 'intermine' ] , ens_gene )
if intermine_resp . is_successful :
mouse_file . write ( "{}\t{}\n" . format ( prot , intermine_resp . gene_id ) )
else :
unmapped_file . write ( "{}\t{}\t{}\n" . format ( prot , ens_gene , taxon ) )
mouse_file . close ( )
# Process Fly eqs #
taxon = config [ 'taxa_specific' ] [ 'fly' ] [ 'tax_id' ]
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' . format ( taxon , VERSION )
fly_map_file = out_path / config [ 'taxa_specific' ] [ 'fly' ] [ 'output_file' ]
fly_file = fly_map_file . open ( 'w' )
path = '{}/{}.protein.links.detailed.{}.txt.gz' . format ( STRING_BASE , taxon , VERSION )
if not args . use_cache :
download_file ( path , dump_file )
ensembl = Ensembl ( "rdf_graph" , True )
p2gene_map = ensembl . fetch_protein_gene_map ( taxon )
fh = gzip . open ( str ( dump_file ) , 'rb' )
df = pd . read_csv ( fh , sep = '\s+' )
fh . close ( )
proteins = pd . unique ( df [ [ 'protein1' , 'protein2' ] ] . values . ravel ( ) )
logger . info ( "Processing {} proteins" . format ( len ( proteins ) ) )
for protein in proteins :
prot = protein . replace ( '{}.' . format ( str ( taxon ) ) , '' )
try :
ens_gene = p2gene_map [ prot ]
ens_curie = "ENSEMBL:{}" . format ( ens_gene )
fly_file . write ( "{}\t{}\n" . format ( prot , ens_curie ) )
continue
except KeyError :
pass
ens_gene = get_xref_protein_gene_rel ( cursor , prot , config [ 'taxa_specific' ] [ 'fly' ] [ 'ensembl' ] , config , taxon )
if ens_gene is not None :
fly_file . write ( "{}\t{}\n" . format ( prot , "ENSEMBL:{}" . format ( ens_gene ) ) )
else :
unmapped_file . write ( "{}\t{}\t{}\n" . format ( prot , '' , taxon ) )
fly_file . close ( )
# Process Worm eqs #
taxon = config [ 'taxa_specific' ] [ 'worm' ] [ 'tax_id' ]
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' . format ( taxon , VERSION )
uniprot_file = raw_dir / config [ 'taxa_specific' ] [ 'worm' ] [ 'uniprot_file' ]
worm_map_file = out_path / config [ 'taxa_specific' ] [ 'worm' ] [ 'output_file' ]
worm_file = worm_map_file . open ( 'w' )
path = '{}/{}.protein.links.detailed.{}.txt.gz' . format ( STRING_BASE , taxon , VERSION )
if not args . use_cache :
download_file ( path , dump_file )
download_file ( config [ 'taxa_specific' ] [ 'worm' ] [ 'uniprot_mappings' ] , uniprot_file )
ensembl = Ensembl ( "rdf_graph" , True )
p2gene_map = ensembl . fetch_protein_gene_map ( taxon )
uni2gene_map = ensembl . fetch_uniprot_gene_map ( taxon )
fh = gzip . open ( str ( uniprot_file ) , 'rb' )
df = pd . read_csv ( fh , sep = '\s+' )
fh . close ( )
string_uniprot_map = { }
for index , row in df . iterrows ( ) :
uniprot_ac = row [ 'uniprot_ac|uniprot_id' ] . split ( '|' ) [ 0 ]
string_uniprot_map [ row [ 'string_id' ] ] = uniprot_ac
fh = gzip . open ( str ( dump_file ) , 'rb' )
df = pd . read_csv ( fh , sep = '\s+' )
fh . close ( )
proteins = pd . unique ( df [ [ 'protein1' , 'protein2' ] ] . values . ravel ( ) )
logger . info ( "Processing {} proteins" . format ( len ( proteins ) ) )
for protein in proteins :
prot = protein . replace ( '{}.' . format ( str ( taxon ) ) , '' )
try :
ens_gene = p2gene_map [ prot ]
ens_curie = "ENSEMBL:{}" . format ( ens_gene )
worm_file . write ( "{}\t{}\n" . format ( prot , ens_curie ) )
continue
except KeyError :
pass
try :
uniprot_ac = string_uniprot_map [ prot ]
ens_gene = uni2gene_map [ uniprot_ac ]
ens_curie = "ENSEMBL:{}" . format ( ens_gene )
worm_file . write ( "{}\t{}\n" . format ( prot , ens_curie ) )
continue
except KeyError :
pass
unmapped_file . write ( "{}\t{}\t{}\n" . format ( prot , '' , taxon ) )
worm_file . close ( )
# Process ZFIN eqs #
taxon = config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'tax_id' ]
# IO
dump_file = raw_dir / '{}.protein.links.detailed.{}.txt.gz' . format ( taxon , VERSION )
zfin_map_file = out_path / config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'output_file' ]
zfin_file = zfin_map_file . open ( 'w' )
path = '{}/{}.protein.links.detailed.{}.txt.gz' . format ( STRING_BASE , taxon , VERSION )
if not args . use_cache :
download_file ( path , dump_file )
ensembl = Ensembl ( "rdf_graph" , True )
p2gene_map = ensembl . fetch_protein_gene_map ( taxon )
# in 3.6 gzip accepts Paths
fh = gzip . open ( str ( dump_file ) , 'rb' )
df = pd . read_csv ( fh , sep = '\s+' )
fh . close ( )
proteins = pd . unique ( df [ [ 'protein1' , 'protein2' ] ] . values . ravel ( ) )
logger . info ( "Processing {} proteins" . format ( len ( proteins ) ) )
for protein in proteins :
prot = protein . replace ( '{}.' . format ( str ( taxon ) ) , '' )
try :
ens_gene = p2gene_map [ prot ]
ens_curie = "ENSEMBL:{}" . format ( ens_gene )
zfin_file . write ( "{}\t{}\n" . format ( prot , ens_curie ) )
continue
except KeyError :
pass
intermine_resp = query_fishmine ( config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'intermine' ] , prot )
if intermine_resp . is_successful :
zfin_file . write ( "{}\t{}\n" . format ( prot , intermine_resp . gene_id ) )
continue
ens_gene = get_deprecated_protein_gene_rel ( cursor , prot , config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'ensembl' ] , config )
intermine_resp = query_fishmine ( config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'intermine' ] , ens_gene )
if intermine_resp . is_successful :
zfin_file . write ( "{}\t{}\n" . format ( prot , intermine_resp . gene_id ) )
continue
intermine_resp = query_fishmine ( config [ 'taxa_specific' ] [ 'zebrafish' ] [ 'intermine' ] , ens_gene , "Pseudogene" )
if intermine_resp . is_successful :
zfin_file . write ( "{}\t{}\n" . format ( prot , intermine_resp . gene_id ) )
else :
unmapped_file . write ( "{}\t{}\t{}\n" . format ( prot , ens_gene , taxon ) )
zfin_file . close ( )
unmapped_file . close ( )
connection . close ( )
logger . info ( "ID Map Finished" )
|
def search_expression_levels ( self , rna_quantification_id = "" , names = [ ] , threshold = 0.0 ) :
"""Returns an iterator over the ExpressionLevel objects from the server
: param str feature _ ids : The IDs of the
: class : ` ga4gh . protocol . Feature ` of interest .
: param str rna _ quantification _ id : The ID of the
: class : ` ga4gh . protocol . RnaQuantification ` of interest .
: param float threshold : Minimum expression of responses to return ."""
|
request = protocol . SearchExpressionLevelsRequest ( )
request . rna_quantification_id = rna_quantification_id
request . names . extend ( names )
request . threshold = threshold
request . page_size = pb . int ( self . _page_size )
return self . _run_search_request ( request , "expressionlevels" , protocol . SearchExpressionLevelsResponse )
|
def factory ( name , desc , type , subtypes = None , required = True , default = None , ctor = None , hide = False , ) :
"""desc : >
Creates a DocStringArg and recursively includes child
docstrings if they are not JSON types .
args :
- name : name
desc : The name of the argument
type : str
- name : desc
desc : A description of the argument
type : str
- name : type
desc : The type of the argument
type : str
- name : subtypes
desc : >
If @ type is a list , dict or other data structure ,
the contained objects should be of this type .
This should be a list of one item for lists , or a list
of 2 items for a dict . More than 2 items is not
supported .
type : list
required : false
default : None
- name : required
desc : >
True if this is a mandatory argument , False if it
has a default value . If False , @ default should be
set appropriately .
type : bool
required : false
default : true
- name : default
desc : >
The default value for this argument . This is ignored
if @ required = = True
type : any
- name : ctor
desc : >
Only use if @ type is a class instance and not a JSON type
The constructor that the JSON object will be
unmarshalled to . Either the Class . _ _ init _ _ , or a
factory function or factory static method .
Use the full path : module . submodule . Class . _ _ init _ _
type : str
required : false
default : None
- name : hide
desc : >
Don ' t display this argument to the user . Useful for
not showing arguments that are excluded from marshalling .
Note that this is only a hint to the client that will
render the JSON , the argument will still be sent
required : false
type : bool
default : false
returns :
desc : A DocStringArg instance with recursively populated
@ docstring attributes for child arguments
type : pymarshal . api _ docs . docstring . DocStringArg"""
|
if ctor :
type_assert ( ctor , str )
module_name , cls_name , method_name = ctor . rsplit ( '.' , 2 )
module = importlib . import_module ( module_name )
cls = getattr ( module , cls_name )
method = getattr ( cls , method_name )
docstring = DocString . from_ctor ( method )
else :
docstring = None
return DocStringArg ( name , desc , type , subtypes , required , default , docstring = docstring , hide = hide , )
|
def send_exit_with_code ( cls , sock , code ) :
"""Send an Exit chunk over the specified socket , containing the specified return code ."""
|
encoded_exit_status = cls . encode_int ( code )
cls . send_exit ( sock , payload = encoded_exit_status )
|
def array ( a , dtype : type = None , ** kwargs ) -> np . ndarray :
"Same as ` np . array ` but also handles generators . ` kwargs ` are passed to ` np . array ` with ` dtype ` ."
|
if not isinstance ( a , collections . Sized ) and not getattr ( a , '__array_interface__' , False ) :
a = list ( a )
if np . int_ == np . int32 and dtype is None and is_listy ( a ) and len ( a ) and isinstance ( a [ 0 ] , int ) :
dtype = np . int64
return np . array ( a , dtype = dtype , ** kwargs )
|
def get_mappings_for_fit ( self , dense = False ) :
"""Parameters
dense : bool , optional .
Dictates if sparse matrices will be returned or dense numpy arrays .
Returns
mapping _ dict : OrderedDict .
Keys will be ` [ " rows _ to _ obs " , " rows _ to _ alts " , " chosen _ row _ to _ obs " ,
" rows _ to _ nests " ] ` . The value for ` rows _ to _ obs ` will map the rows of
the ` long _ form ` to the unique observations ( on the columns ) in
their order of appearance . The value for ` rows _ to _ alts ` will map
the rows of the ` long _ form ` to the unique alternatives which are
possible in the dataset ( on the columns ) , in sorted order - - not
order of appearance . The value for ` chosen _ row _ to _ obs ` , if not
None , will map the rows of the ` long _ form ` that contain the chosen
alternatives to the specific observations those rows are associated
with ( denoted by the columns ) . The value of ` rows _ to _ nests ` , if not
None , will map the rows of the ` long _ form ` to the nest ( denoted by
the column ) that contains the row ' s alternative . If ` dense = = True ` ,
the returned values will be dense numpy arrays . Otherwise , the
returned values will be scipy sparse arrays ."""
|
return create_long_form_mappings ( self . data , self . obs_id_col , self . alt_id_col , choice_col = self . choice_col , nest_spec = self . nest_spec , mix_id_col = self . mixing_id_col , dense = dense )
|
def setValue ( self , value ) :
"""Sets the value that will be used for this query instance .
: param value < variant >"""
|
self . __value = projex . text . decoded ( value ) if isinstance ( value , ( str , unicode ) ) else value
|
def build_query ( self ) :
'''Using the three graphs derived from self . _ diff _ graph ( ) , build a sparql update query in the format :
PREFIX foo : < http : / / foo . com >
PREFIX bar : < http : / / bar . com >
DELETE { . . . }
INSERT { . . . }
WHERE { . . . }
Args :
None : uses variables from self
Returns :
( str ) sparql update query as string'''
|
# derive namespaces to include prefixes in Sparql update query
self . _derive_namespaces ( )
sparql_query = ''
# add prefixes
for ns_prefix , ns_uri in self . update_prefixes . items ( ) :
sparql_query += "PREFIX %s: <%s>\n" % ( ns_prefix , str ( ns_uri ) )
# deletes
removed_serialized = self . diffs . removed . serialize ( format = 'nt' ) . decode ( 'utf-8' )
sparql_query += '\nDELETE {\n%s}\n\n' % removed_serialized
# inserts
added_serialized = self . diffs . added . serialize ( format = 'nt' ) . decode ( 'utf-8' )
sparql_query += '\nINSERT {\n%s}\n\n' % added_serialized
# where ( not yet implemented )
sparql_query += 'WHERE {}'
# debug
# logger . debug ( sparql _ query )
# return query
return sparql_query
|
def set_schema_to_public ( self ) :
"""Instructs to stay in the common ' public ' schema ."""
|
self . tenant = FakeTenant ( schema_name = get_public_schema_name ( ) )
self . schema_name = get_public_schema_name ( )
self . set_settings_schema ( self . schema_name )
self . search_path_set = False
|
def _AnyMessageToJsonObject ( self , message ) :
"""Converts Any message according to Proto3 JSON Specification ."""
|
if not message . ListFields ( ) :
return { }
# Must print @ type first , use OrderedDict instead of { }
js = OrderedDict ( )
type_url = message . type_url
js [ '@type' ] = type_url
sub_message = _CreateMessageFromTypeUrl ( type_url )
sub_message . ParseFromString ( message . value )
message_descriptor = sub_message . DESCRIPTOR
full_name = message_descriptor . full_name
if _IsWrapperMessage ( message_descriptor ) :
js [ 'value' ] = self . _WrapperMessageToJsonObject ( sub_message )
return js
if full_name in _WKTJSONMETHODS :
js [ 'value' ] = methodcaller ( _WKTJSONMETHODS [ full_name ] [ 0 ] , sub_message ) ( self )
return js
return self . _RegularMessageToJsonObject ( sub_message , js )
|
def delete_event ( self , id , ** data ) :
"""DELETE / events / : id /
Deletes an event if the delete is permitted . In order for a delete to be permitted , there must be no pending or
completed orders . Returns a boolean indicating success or failure of the delete ."""
|
return self . delete ( "/events/{0}/" . format ( id ) , data = data )
|
def delaunay_2d ( self , tol = 1e-05 , alpha = 0.0 , offset = 1.0 , bound = False , inplace = False ) :
"""Apply a delaunay 2D filter along the best fitting plane"""
|
alg = vtk . vtkDelaunay2D ( )
alg . SetProjectionPlaneMode ( vtk . VTK_BEST_FITTING_PLANE )
alg . SetInputDataObject ( self )
alg . SetTolerance ( tol )
alg . SetAlpha ( alpha )
alg . SetOffset ( offset )
alg . SetBoundingTriangulation ( bound )
alg . Update ( )
mesh = _get_output ( alg )
if inplace :
self . overwrite ( mesh )
else :
return mesh
|
def register_lookup ( cls , lookup , lookup_name = None ) :
"""Register a Lookup to a class"""
|
if lookup_name is None :
lookup_name = lookup . lookup_name
if 'class_lookups' not in cls . __dict__ :
cls . class_lookups = { }
cls . class_lookups [ lookup_name ] = lookup
cls . _clear_cached_lookups ( )
return lookup
|
def DefaultSelector ( ) :
"""This function serves as a first call for DefaultSelector to
detect if the select module is being monkey - patched incorrectly
by eventlet , greenlet , and preserve proper behavior ."""
|
global _DEFAULT_SELECTOR
if _DEFAULT_SELECTOR is None :
if platform . python_implementation ( ) == 'Jython' : # Platform - specific : Jython
_DEFAULT_SELECTOR = JythonSelectSelector
elif _can_allocate ( 'kqueue' ) :
_DEFAULT_SELECTOR = KqueueSelector
elif _can_allocate ( 'devpoll' ) :
_DEFAULT_SELECTOR = DevpollSelector
elif _can_allocate ( 'epoll' ) :
_DEFAULT_SELECTOR = EpollSelector
elif _can_allocate ( 'poll' ) :
_DEFAULT_SELECTOR = PollSelector
elif hasattr ( select , 'select' ) :
_DEFAULT_SELECTOR = SelectSelector
else : # Platform - specific : AppEngine
raise RuntimeError ( 'Platform does not have a selector.' )
return _DEFAULT_SELECTOR ( )
|
def _cosine ( a , b ) :
"""Return the len ( a & b ) / len ( a )"""
|
return 1. * len ( a & b ) / ( math . sqrt ( len ( a ) ) * math . sqrt ( len ( b ) ) )
|
def link_text ( self ) :
"""Get a text represention of the links node .
: return :"""
|
s = ''
links_node = self . metadata . find ( 'links' )
if links_node is None :
return s
links = links_node . getchildren ( )
if links is None :
return s
s += 'IOC Links\n'
for link in links :
rel = link . attrib . get ( 'rel' , 'No Rel' )
href = link . attrib . get ( 'href' )
text = link . text
lt = '{rel}{href}: {text}\n' . format ( rel = rel , href = ' @ {}' . format ( href ) if href else '' , text = text )
s += lt
return s
|
def customer_lifetime_value ( self , transaction_prediction_model , frequency , recency , T , monetary_value , time = 12 , discount_rate = 0.01 , freq = "D" ) :
"""Return customer lifetime value .
This method computes the average lifetime value for a group of one
or more customers .
Parameters
transaction _ prediction _ model : model
the model to predict future transactions , literature uses
pareto / ndb models but we can also use a different model like beta - geo models
frequency : array _ like
the frequency vector of customers ' purchases
( denoted x in literature ) .
recency : the recency vector of customers ' purchases
( denoted t _ x in literature ) .
T : array _ like
customers ' age ( time units since first purchase )
monetary _ value : array _ like
the monetary value vector of customer ' s purchases
( denoted m in literature ) .
time : float , optional
the lifetime expected for the user in months . Default : 12
discount _ rate : float , optional
the monthly adjusted discount rate . Default : 0.01
freq : string , optional
{ " D " , " H " , " M " , " W " } for day , hour , month , week . This represents what unit of time your T is measure in .
Returns
Series :
Series object with customer ids as index and the estimated customer
lifetime values as values"""
|
# use the Gamma - Gamma estimates for the monetary _ values
adjusted_monetary_value = self . conditional_expected_average_profit ( frequency , monetary_value )
return _customer_lifetime_value ( transaction_prediction_model , frequency , recency , T , adjusted_monetary_value , time , discount_rate , freq = freq )
|
def persist_perf ( run , session , svg_path ) :
"""Persist the flamegraph in the database .
The flamegraph exists as a SVG image on disk until we persist it in the
database .
Args :
run : The run we attach these perf measurements to .
session : The db transaction we belong to .
svg _ path : The path to the SVG file we want to store ."""
|
from benchbuild . utils import schema as s
with open ( svg_path , 'r' ) as svg_file :
svg_data = svg_file . read ( )
session . add ( s . Metadata ( name = "perf.flamegraph" , value = svg_data , run_id = run . id ) )
|
def sample_forecast_max_hail ( self , dist_model_name , condition_model_name , num_samples , condition_threshold = 0.5 , query = None ) :
"""Samples every forecast hail object and returns an empirical distribution of possible maximum hail sizes .
Hail sizes are sampled from each predicted gamma distribution . The total number of samples equals
num _ samples * area of the hail object . To get the maximum hail size for each realization , the maximum
value within each area sample is used .
Args :
dist _ model _ name : Name of the distribution machine learning model being evaluated
condition _ model _ name : Name of the hail / no - hail model being evaluated
num _ samples : Number of maximum hail samples to draw
condition _ threshold : Threshold for drawing hail samples
query : A str that selects a subset of the data for evaluation
Returns :
A numpy array containing maximum hail samples for each forecast object ."""
|
if query is not None :
dist_forecasts = self . matched_forecasts [ "dist" ] [ dist_model_name ] . query ( query )
dist_forecasts = dist_forecasts . reset_index ( drop = True )
condition_forecasts = self . matched_forecasts [ "condition" ] [ condition_model_name ] . query ( query )
condition_forecasts = condition_forecasts . reset_index ( drop = True )
else :
dist_forecasts = self . matched_forecasts [ "dist" ] [ dist_model_name ]
condition_forecasts = self . matched_forecasts [ "condition" ] [ condition_model_name ]
max_hail_samples = np . zeros ( ( dist_forecasts . shape [ 0 ] , num_samples ) )
areas = dist_forecasts [ "Area" ] . values
for f in np . arange ( dist_forecasts . shape [ 0 ] ) :
condition_prob = condition_forecasts . loc [ f , self . forecast_bins [ "condition" ] [ 0 ] ]
if condition_prob >= condition_threshold :
max_hail_samples [ f ] = np . sort ( gamma . rvs ( * dist_forecasts . loc [ f , self . forecast_bins [ "dist" ] ] . values , size = ( num_samples , areas [ f ] ) ) . max ( axis = 1 ) )
return max_hail_samples
|
def parse_query_parms ( method , uri , query_str ) :
"""Parse the specified query parms string and return a dictionary of query
parameters . The key of each dict item is the query parameter name , and the
value of each dict item is the query parameter value . If a query parameter
shows up more than once , the resulting dict item value is a list of all
those values .
query _ str is the query string from the URL , everything after the ' ? ' . If
it is empty or None , None is returned .
If a query parameter is not of the format " name = value " , an HTTPError 400,1
is raised ."""
|
if not query_str :
return None
query_parms = { }
for query_item in query_str . split ( '&' ) : # Example for these items : ' name = a % 20b '
if query_item == '' :
continue
items = query_item . split ( '=' )
if len ( items ) != 2 :
raise BadRequestError ( method , uri , reason = 1 , message = "Invalid format for URI query parameter: {!r} " "(valid format is: 'name=value')." . format ( query_item ) )
name = unquote ( items [ 0 ] )
value = unquote ( items [ 1 ] )
if name in query_parms :
existing_value = query_parms [ name ]
if not isinstance ( existing_value , list ) :
query_parms [ name ] = list ( )
query_parms [ name ] . append ( existing_value )
query_parms [ name ] . append ( value )
else :
query_parms [ name ] = value
return query_parms
|
def from_data ( data ) :
"""Create a chunk from data including header and length bytes ."""
|
header , length = struct . unpack ( '4s<I' , data [ : 8 ] )
data = data [ 8 : ]
return RiffDataChunk ( header , data )
|
def check_and_order_id_inputs ( rid , ridx , cid , cidx , row_meta_df , col_meta_df ) :
"""Makes sure that ( if entered ) id inputs entered are of one type ( string id or index )
Input :
- rid ( list or None ) : if not None , a list of rids
- ridx ( list or None ) : if not None , a list of indexes
- cid ( list or None ) : if not None , a list of cids
- cidx ( list or None ) : if not None , a list of indexes
Output :
- a tuple of the ordered ridx and cidx"""
|
( row_type , row_ids ) = check_id_idx_exclusivity ( rid , ridx )
( col_type , col_ids ) = check_id_idx_exclusivity ( cid , cidx )
row_ids = check_and_convert_ids ( row_type , row_ids , row_meta_df )
ordered_ridx = get_ordered_idx ( row_type , row_ids , row_meta_df )
col_ids = check_and_convert_ids ( col_type , col_ids , col_meta_df )
ordered_cidx = get_ordered_idx ( col_type , col_ids , col_meta_df )
return ( ordered_ridx , ordered_cidx )
|
def transform_data ( self , data ) :
"""Apply pre - processing transformation to data , and add it to data
dict .
Parameters
data : instance of Segments
segments including ' data ' ( ChanTime )
Returns
instance of Segments
same object with transformed data as ' trans _ data ' ( ChanTime )"""
|
trans = self . trans
differ = trans [ 'diff' ] . get_value ( )
bandpass = trans [ 'bandpass' ] . get_value ( )
notch1 = trans [ 'notch1' ] . get_value ( )
notch2 = trans [ 'notch2' ] . get_value ( )
for seg in data :
dat = seg [ 'data' ]
if differ :
dat = math ( dat , operator = diff , axis = 'time' )
if bandpass != 'none' :
order = trans [ 'bp' ] [ 'order' ] [ 1 ] . get_value ( )
f1 = trans [ 'bp' ] [ 'f1' ] [ 1 ] . get_value ( )
f2 = trans [ 'bp' ] [ 'f2' ] [ 1 ] . get_value ( )
if f1 == '' :
f1 = None
if f2 == '' :
f2 = None
dat = filter_ ( dat , low_cut = f1 , high_cut = f2 , order = order , ftype = bandpass )
if notch1 != 'none' :
order = trans [ 'n1' ] [ 'order' ] [ 1 ] . get_value ( )
cf = trans [ 'n1' ] [ 'cf' ] [ 1 ] . get_value ( )
hbw = trans [ 'n1' ] [ 'bw' ] [ 1 ] . get_value ( ) / 2.0
lo_pass = cf - hbw
hi_pass = cf + hbw
dat = filter_ ( dat , low_cut = hi_pass , order = order , ftype = notch1 )
dat = filter_ ( dat , high_cut = lo_pass , order = order , ftype = notch1 )
if notch2 != 'none' :
order = trans [ 'n2' ] [ 'order' ] [ 1 ] . get_value ( )
cf = trans [ 'n2' ] [ 'cf' ] [ 1 ] . get_value ( )
hbw = trans [ 'n2' ] [ 'bw' ] [ 1 ] . get_value ( ) / 2.0
lo_pass = cf - hbw
hi_pass = cf + hbw
dat = filter_ ( dat , low_cut = hi_pass , order = order , ftype = notch1 )
dat = filter_ ( dat , high_cut = lo_pass , order = order , ftype = notch1 )
seg [ 'trans_data' ] = dat
return data
|
def GetPresetsInformation ( cls ) :
"""Retrieves the presets information .
Returns :
list [ tuple ] : containing :
str : preset name
str : comma separated parser names that are defined by the preset"""
|
parser_presets_information = [ ]
for preset_definition in ParsersManager . GetPresets ( ) :
preset_information_tuple = ( preset_definition . name , ', ' . join ( preset_definition . parsers ) )
# TODO : refactor to pass PresetDefinition .
parser_presets_information . append ( preset_information_tuple )
return parser_presets_information
|
def slamdunkUtrRatesPlot ( self ) :
"""Generate the UTR rates plot"""
|
cats = OrderedDict ( )
keys = [ 'T>C' , 'A>T' , 'A>G' , 'A>C' , 'T>A' , 'T>G' , 'G>A' , 'G>T' , 'G>C' , 'C>A' , 'C>T' , 'C>G' ]
for i , v in enumerate ( keys ) :
cats [ v ] = { 'color' : self . plot_cols [ i ] }
pconfig = { 'id' : 'slamdunk_utrratesplot' , 'title' : 'Slamdunk: Overall conversion rates per UTR' , 'cpswitch' : False , 'cpswitch_c_active' : False , 'ylab' : 'Number of conversions' , 'stacking' : 'normal' , 'tt_decimals' : 2 , 'tt_suffix' : '%' , 'tt_percentages' : False , 'hide_zero_cats' : False }
self . add_section ( name = 'Conversion rates per UTR' , anchor = 'slamdunk_utr_rates' , description = """This plot shows the individual conversion rates for all UTRs
(see the <a href="http://t-neumann.github.io/slamdunk/docs.html#utrrates" target="_blank">slamdunk docs</a>).""" , plot = bargraph . plot ( self . utrates_data , cats , pconfig ) )
|
def create_job ( self ) :
"""Create public Luna job
Returns :
job _ id ( basestring ) : Luna job id"""
|
my_user_agent = None
try :
my_user_agent = pkg_resources . require ( 'netort' ) [ 0 ] . version
except pkg_resources . DistributionNotFound :
my_user_agent = 'DistributionNotFound'
finally :
headers = { "User-Agent" : "Uploader/{uploader_ua}, {upward_ua}" . format ( upward_ua = self . meta . get ( 'user_agent' , '' ) , uploader_ua = my_user_agent ) }
req = requests . Request ( 'POST' , "{api_address}{path}" . format ( api_address = self . api_address , path = self . create_job_path ) , headers = headers )
req . data = { 'test_start' : self . job . test_start }
prepared_req = req . prepare ( )
logger . debug ( 'Prepared create_job request:\n%s' , pretty_print ( prepared_req ) )
response = send_chunk ( self . session , prepared_req )
logger . debug ( 'Luna create job status: %s' , response . status_code )
logger . debug ( 'Answ data: %s' , response . content )
job_id = response . content . decode ( 'utf-8' ) if isinstance ( response . content , bytes ) else response . content
if not job_id :
self . failed . set ( )
raise ValueError ( 'Luna returned answer without jobid: %s' , response . content )
else :
logger . info ( 'Luna job created: %s' , job_id )
return job_id
|
def on_config_value_changed ( self , config_m , prop_name , info ) :
"""Callback when a config value has been changed
: param ConfigModel config _ m : The config model that has been changed
: param str prop _ name : Should always be ' config '
: param dict info : Information e . g . about the changed config key"""
|
config_key = info [ 'args' ] [ 1 ]
if config_key in [ "EXECUTION_TICKER_ENABLED" ] :
self . check_configuration ( )
|
def polyline ( self , arr ) :
"""Draw a set of lines"""
|
for i in range ( 0 , len ( arr ) - 1 ) :
self . line ( arr [ i ] [ 0 ] , arr [ i ] [ 1 ] , arr [ i + 1 ] [ 0 ] , arr [ i + 1 ] [ 1 ] )
|
def submit ( self ) :
"""Partitions the file into chunks and submits them into group of 4
for download on the api download pool ."""
|
futures = [ ]
while self . submitted < 4 and not self . done ( ) :
part = self . parts . pop ( 0 )
futures . append ( self . pool . submit ( _download_part , self . file_path , self . session , self . url , self . retry , self . timeout , * part ) )
self . submitted += 1
self . total_submitted += 1
return futures
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.