signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def profit_construct ( self ) :
"""利润构成
Returns :
dict - - 利润构成表""" | return { 'total_buyandsell' : round ( self . profit_money - self . total_commission - self . total_tax , 2 ) , 'total_tax' : self . total_tax , 'total_commission' : self . total_commission , 'total_profit' : self . profit_money } |
def register_factory ( self , specification , factory , scope = None ) :
"""Shortcut for creating and registering
a : py : class : ` wiring . providers . FactoryProvider ` .""" | self . register_provider ( specification , FactoryProvider ( factory , scope = scope ) ) |
def remove ( self , item ) :
"""Transactional implementation of : func : ` List . remove ( item ) < hazelcast . proxy . list . List . remove > `
: param item : ( object ) , the specified item to be removed .
: return : ( bool ) , ` ` true ` ` if the item is removed successfully , ` ` false ` ` otherwise .""" | check_not_none ( item , "item can't be none" )
return self . _encode_invoke ( transactional_list_remove_codec , item = self . _to_data ( item ) ) |
def view_aterator ( connection , callback , view , view_keys = dict ( ) , args = tuple ( ) , kwargs = dict ( ) , per_page = 15 , consume_errors = True ) :
'''Asynchronous iterator for the view . Downloads a view in pages
and calls the callback for each row .
This helps avoid transfering data in huge datachunks .''' | skip = 0
while True :
keys = dict ( view_keys )
keys . update ( dict ( skip = skip , limit = per_page ) )
records = yield connection . query_view ( view , ** keys )
log . debug ( 'view_aterator' , "Fetched %d records of the view: %s" , len ( records ) , view . name )
skip += len ( records )
for record in records :
try :
yield callback ( connection , record , * args , ** kwargs )
except Exception as e :
error . handle_exception ( 'view_aterator' , e , "Callback %s failed its iteration on a row %r" , callback . __name__ , record )
if not consume_errors :
raise e
if not records :
break |
def authenticate ( self , user , password ) :
"""Authenticate user .""" | assert user [ 'password_hash' ] == '_' . join ( ( password , 'hash' ) )
self . logger . debug ( 'User %s has been successfully authenticated' , user [ 'uid' ] ) |
def requests ( self , code = None , ** kwargs ) :
"""Retrieve open requests . You can also enter a specific service code
argument .
> > > Three ( ' api . city . gov ' ) . requests ( )
{ ' all ' : { ' requests ' : ' data ' } }
> > > Three ( ' api . city . gov ' ) . requests ( ' 123 ' )
{ ' 123 ' : { ' requests ' : ' data ' } }""" | if code :
kwargs [ 'service_code' ] = code
data = self . get ( 'requests' , ** kwargs )
return data |
def cumulative_value ( self , slip , mmax , mag_value , bbar , dbar , beta ) :
'''Returns the rate of events with M > mag _ value
: param float slip :
Slip rate in mm / yr
: param float mmax :
Maximum magnitude
: param float mag _ value :
Magnitude value
: param float bbar :
\b ar { b } parameter ( effectively = b * log ( 10 . ) )
: param float dbar :
\b ar { d } parameter
: param float beta :
Beta value of formula defined in Eq . 20 of Anderson & Luco ( 1983)''' | delta_m = ( mmax - mag_value )
a_1 = self . _get_a1_value ( bbar , dbar , slip / 10. , beta , mmax )
return a_1 * np . exp ( bbar * delta_m ) * ( delta_m > 0.0 ) |
def _is_finished_dumping_checkpoint ( directory ) :
"""Recent versions of RTA ( 1.10 or better ) , write the complete file .
This is the most straightforward source but as of 1.10 still does not
work correctly as the file will be created at the end of Read 1 even
if there are multiple reads .""" | check_file = os . path . join ( directory , "Basecalling_Netcopy_complete.txt" )
check_v1 , check_v2 = ( 1 , 10 )
if os . path . exists ( check_file ) :
with open ( check_file ) as in_handle :
line = in_handle . readline ( ) . strip ( )
if line :
version = line . split ( ) [ - 1 ]
v1 , v2 = [ float ( v ) for v in version . split ( "." ) [ : 2 ] ]
if ( ( v1 > check_v1 ) or ( v1 == check_v1 and v2 >= check_v2 ) ) :
return True |
def tco_return_handle ( tokens ) :
"""Process tail - call - optimizable return statements .""" | internal_assert ( len ( tokens ) == 2 , "invalid tail-call-optimizable return statement tokens" , tokens )
if tokens [ 1 ] . startswith ( "()" ) :
return "return _coconut_tail_call(" + tokens [ 0 ] + ")" + tokens [ 1 ] [ 2 : ]
# tokens [ 1 ] contains \ n
else :
return "return _coconut_tail_call(" + tokens [ 0 ] + ", " + tokens [ 1 ] [ 1 : ] |
def getEmpTraitCorrCoef ( self ) :
"""Returns the empirical trait correlation matrix""" | cov = self . getEmpTraitCovar ( )
stds = SP . sqrt ( cov . diagonal ( ) ) [ : , SP . newaxis ]
RV = cov / stds / stds . T
return RV |
def sec_to_public_pair ( sec , generator = None , strict = True ) :
"""Convert a public key in sec binary format to a public pair .""" | byte_count = ( generator . p ( ) . bit_length ( ) + 7 ) >> 3 if generator else ( len ( sec ) - 1 )
x = from_bytes_32 ( sec [ 1 : 1 + byte_count ] )
sec0 = sec [ : 1 ]
if len ( sec ) == 1 + byte_count * 2 :
isok = sec0 == b'\4'
if not strict :
isok = isok or ( sec0 in [ b'\6' , b'\7' ] )
if isok :
y = from_bytes_32 ( sec [ 1 + byte_count : 1 + 2 * byte_count ] )
return ( x , y )
elif len ( sec ) == 1 + byte_count :
if not strict or ( sec0 in ( b'\2' , b'\3' ) ) :
is_y_odd = ( sec0 != b'\2' )
return generator . points_for_x ( x ) [ is_y_odd ]
raise EncodingError ( "bad sec encoding for public key" ) |
def filter_images_urls ( image_urls , image_filter , common_image_filter = None ) :
'''图片链接过滤器 , 根据传入的过滤器规则 , 对图片链接列表进行过滤并返回结果列表
: param list ( str ) image _ urls : 图片链接字串列表
: param list ( str ) image _ filter : 过滤器字串列表
: param list ( str ) common _ image _ filter : 可选 , 通用的基础过滤器 ,
会在定制过滤器前对传入图片应用
: return : 过滤后的结果链接列表 , 以及被过滤掉的链接列表
: rtype : list ( str ) , list ( str )
: raises TypeError : image _ filter 不为字串或列表
: raises ValueError : image _ filter 中存在空值''' | common_image_filter = common_image_filter or [ ]
# 对图片过滤器进行完整性验证
image_filter = json . loads ( image_filter , encoding = 'utf-8' )
if not isinstance ( image_filter , ( str , list ) ) :
raise TypeError ( 'image_filter not str or list' )
if isinstance ( image_filter , str ) :
image_filter = [ image_filter ]
if not all ( image_filter ) :
raise ValueError ( 'image_filter 中存在空值:{}' . format ( image_filter ) )
rc = copy . deepcopy ( image_urls )
rc_removed = [ ]
for i in image_urls : # 执行内置过滤器
for f in common_image_filter :
if re . search ( f , i ) :
rc . remove ( i )
rc_removed . append ( i )
# 执行具体文章源的定制过滤器
for f in image_filter :
if re . search ( f , i ) :
rc . remove ( i )
rc_removed . append ( i )
return rc , rc_removed |
def build_acl_port ( self , port , enabled = True ) :
"Build the acl for L4 Ports ." | if port is not None :
if ':' in port :
range = port . replace ( ':' , ' ' )
acl = "range %(range)s " % { 'range' : range }
else :
acl = "eq %(port)s " % { 'port' : port }
if not enabled :
acl += "inactive"
return acl |
def get ( self , request , slug ) :
"""Basic functionality for GET request to view .""" | matching_datasets = self . generate_matching_datasets ( slug )
if matching_datasets is None :
raise Http404 ( "Datasets meeting these criteria do not exist." )
base_context = { 'datasets' : matching_datasets , 'num_datasets' : matching_datasets . count ( ) , 'page_title' : self . generate_page_title ( slug ) , }
additional_context = self . generate_additional_context ( matching_datasets )
base_context . update ( additional_context )
context = base_context
return render ( request , self . template_path , context ) |
def _setup_exercise ( game_interface : GameInterface , ex : Exercise , seed : int ) -> Optional [ Result ] :
"""Set the game state .
Only returns a result if there was an error in ex . setup ( )""" | rng = random . Random ( )
rng . seed ( seed )
try :
game_state = ex . setup ( rng )
except Exception as e :
return Result ( ex , seed , FailDueToExerciseException ( e , traceback . format_exc ( ) ) )
game_interface . set_game_state ( game_state ) |
def to_cmd_args ( mapping ) : # type : ( dict ) - > list
"""Transform a dictionary in a list of cmd arguments .
Example :
> > > args = mapping . to _ cmd _ args ( { ' model _ dir ' : ' / opt / ml / model ' , ' batch _ size ' : 25 } )
> > > print ( args )
[ ' - - model _ dir ' , ' / opt / ml / model ' , ' - - batch _ size ' , 25]
Args :
mapping ( dict [ str , object ] ) : A Python mapping .
Returns :
( list ) : List of cmd arguments""" | sorted_keys = sorted ( mapping . keys ( ) )
def arg_name ( obj ) :
string = _decode ( obj )
if string :
return u'--%s' % string if len ( string ) > 1 else u'-%s' % string
else :
return u''
arg_names = [ arg_name ( argument ) for argument in sorted_keys ]
def arg_value ( value ) :
if hasattr ( value , 'items' ) :
map_items = [ '%s=%s' % ( k , v ) for k , v in sorted ( value . items ( ) ) ]
return ',' . join ( map_items )
return _decode ( value )
arg_values = [ arg_value ( mapping [ key ] ) for key in sorted_keys ]
items = zip ( arg_names , arg_values )
return [ item for item in itertools . chain . from_iterable ( items ) ] |
def json ( self ) :
"""Get the result of simplejson . loads if possible .""" | if 'json' not in self . environ . get ( 'CONTENT_TYPE' , '' ) :
raise BadRequest ( 'Not a JSON request' )
try :
return loads ( self . data )
except Exception :
raise BadRequest ( 'Unable to read JSON request' ) |
def agents ( ) :
"""Lists the currently active agents""" | print 'The following LiveSync agents are active:'
agent_list = LiveSyncAgent . find ( ) . order_by ( LiveSyncAgent . backend_name , db . func . lower ( LiveSyncAgent . name ) ) . all ( )
table_data = [ [ 'ID' , 'Name' , 'Backend' , 'Initial Export' , 'Queue' ] ]
for agent in agent_list :
initial = ( cformat ( '%{green!}done%{reset}' ) if agent . initial_data_exported else cformat ( '%{yellow!}pending%{reset}' ) )
if agent . backend is None :
backend_title = cformat ( '%{red!}invalid backend ({})%{reset}' ) . format ( agent . backend_name )
else :
backend_title = agent . backend . title
table_data . append ( [ unicode ( agent . id ) , agent . name , backend_title , initial , unicode ( agent . queue . filter_by ( processed = False ) . count ( ) ) ] )
table = AsciiTable ( table_data )
table . justify_columns [ 4 ] = 'right'
print table . table
if not all ( a . initial_data_exported for a in agent_list ) :
print
print "You need to perform the initial data export for some agents."
print cformat ( "To do so, run " "%{yellow!}indico livesync initial_export %{reset}%{yellow}<agent_id>%{reset} for those agents." ) |
def update ( self , id , * args , ** kwargs ) :
"""Update a source
Updates source information
If the specified source does not exist , the request will return an error
< figure class = " notice " >
If you want to update a source , you * * must * * make sure source ' s name is unique
< / figure >
: calls : ` ` put / deal _ sources / { id } ` `
: param int id : Unique identifier of a DealSource .
: param tuple * args : ( optional ) Single object representing DealSource resource which attributes should be updated .
: param dict * * kwargs : ( optional ) DealSource attributes to update .
: return : Dictionary that support attriubte - style access and represents updated DealSource resource .
: rtype : dict""" | if not args and not kwargs :
raise Exception ( 'attributes for DealSource are missing' )
attributes = args [ 0 ] if args else kwargs
attributes = dict ( ( k , v ) for k , v in attributes . iteritems ( ) if k in self . OPTS_KEYS_TO_PERSIST )
_ , _ , deal_source = self . http_client . put ( "/deal_sources/{id}" . format ( id = id ) , body = attributes )
return deal_source |
def fcd2dri ( inpFCD , outSTRM , ignored ) :
"""Reformats the contents of the given fcd - output file into a . dri file , readable
by PHEM . The fcd - output " fcd " must be a valid file name of an fcd - output .
The following may be a matter of changes :
- the engine torque is not given""" | # print > > outSTRM , " v1 \ n < t > , < v > , < grad > , < n > \ n [ s ] , [ km / h ] , [ % ] , [ 1 / min ] \ n "
print ( "v1\n<t>,<v>,<grad>\n[s],[km/h],[%]" , file = outSTRM )
for q in inpFCD :
if q . vehicle :
for v in q . vehicle :
percSlope = math . sin ( float ( v . slope ) ) * 100.
print ( "%s,%.3f,%s" % ( sumolib . _intTime ( q . time ) , float ( v . speed ) * 3.6 , percSlope ) , file = outSTRM ) |
def setReferenceVoltage ( self , caldb , calv ) :
"""Sets the reference point to determine what outgoing voltage will produce what intensity ,
used to calculate the proper output amplitude of components
: param caldb : calibration intensity in dbSPL
: type caldb : float
: param calv : calibration voltage that was used to record the intensity provided
: type calv : float""" | self . caldb = caldb
self . calv = calv |
def add_watcher ( self , fd , callback ) :
"""Starts watching a non - blocking fd for data .""" | if not isinstance ( fd , int ) :
fd = fd . fileno ( )
self . callbacks [ fd ] = callback
self . epoll . register ( fd , EPOLLIN ) |
def python ( source ) :
r"""> > > python ( ' def add ( a , b ) : return a + b ' ) . add ( 40 , 2)
42""" | obj = type ( '' , ( object , ) , { } ) ( )
_exec ( source , obj . __dict__ , obj . __dict__ )
return obj |
def match_set ( self , tokens , item ) :
"""Matches a set .""" | match , = tokens
self . add_check ( "_coconut.isinstance(" + item + ", _coconut.abc.Set)" )
self . add_check ( "_coconut.len(" + item + ") == " + str ( len ( match ) ) )
for const in match :
self . add_check ( const + " in " + item ) |
def print_hits ( results ) :
"Simple utility function to print results of a search query ." | print_search_stats ( results )
for hit in results [ 'hits' ] [ 'hits' ] : # get created date for a repo and fallback to authored _ date for a commit
created_at = parse_date ( hit [ '_source' ] . get ( 'created_at' , hit [ '_source' ] [ 'authored_date' ] ) )
print ( '/%s/%s/%s (%s): %s' % ( hit [ '_index' ] , hit [ '_type' ] , hit [ '_id' ] , created_at . strftime ( '%Y-%m-%d' ) , hit [ '_source' ] [ 'description' ] . split ( '\n' ) [ 0 ] ) )
print ( '=' * 80 )
print ( ) |
def profiling_request_formatter ( view , context , model , name ) :
"""Wrap HTTP method value in a bs3 label .""" | document = model [ name ]
return Markup ( '' . join ( [ '<p class="profiling-request">' , '<a href="{}">' . format ( document . get_admin_url ( _external = True ) ) , http_method_formatter ( view , context , document , 'method' ) , ' ' , document . path , '</a>' , '</p>' , ] ) ) |
def configure ( ) :
"""Load logging configuration from our own defaults .""" | log_levels = { 5 : logging . NOTSET , 4 : logging . DEBUG , 3 : logging . INFO , 2 : logging . WARNING , 1 : logging . ERROR , 0 : logging . CRITICAL }
logging . captureWarnings ( True )
root_logger = logging . getLogger ( )
if settings . CFG [ "debug" ] :
details_format = logging . Formatter ( '%(name)s (%(filename)s:%(lineno)s) [%(levelname)s] %(message)s' )
details_hdl = logging . StreamHandler ( )
details_hdl . setFormatter ( details_format )
root_logger . addHandler ( details_hdl )
else :
brief_format = logging . Formatter ( '%(message)s' )
console_hdl = logging . StreamHandler ( )
console_hdl . setFormatter ( brief_format )
root_logger . addHandler ( console_hdl )
root_logger . setLevel ( log_levels [ int ( settings . CFG [ "verbosity" ] ) ] )
configure_plumbum_log ( )
configure_migrate_log ( )
configure_parse_log ( ) |
def clean_readme ( fname ) :
"""Cleanup README . rst for proper PyPI formatting .""" | with codecs . open ( fname , 'r' , 'utf-8' ) as f :
return '' . join ( re . sub ( r':\w+:`([^`]+?)( <[^<>]+>)?`' , r'``\1``' , line ) for line in f if not ( line . startswith ( '.. currentmodule' ) or line . startswith ( '.. toctree' ) ) ) |
def get_task_output_description ( task_output ) :
'''Returns a task ' s output as a string''' | output_description = "n/a"
if isinstance ( task_output , RemoteTarget ) :
output_description = "[SSH] {0}:{1}" . format ( task_output . _fs . remote_context . host , task_output . path )
elif isinstance ( task_output , S3Target ) :
output_description = "[S3] {0}" . format ( task_output . path )
elif isinstance ( task_output , FileSystemTarget ) :
output_description = "[FileSystem] {0}" . format ( task_output . path )
elif isinstance ( task_output , PostgresTarget ) :
output_description = "[DB] {0}:{1}" . format ( task_output . host , task_output . table )
else :
output_description = "to be determined"
return output_description |
def indx ( self , norm ) :
'''Return the property index bytes for the given * normalized * value .''' | name = self . __class__ . __name__
raise s_exc . NoSuchImpl ( name = '%s.indx' % name ) |
def fresh_transaction ( self , name = None ) :
"""On entrance to this context manager , hold an exclusive lock and
create a fresh transaction for redshift , then commit and begin a new
one before releasing the lock on exit .
See drop _ relation in RedshiftAdapter for more information .
: param Optional [ str ] name : The name of the connection to use , or None
to use the default .""" | with drop_lock :
connection = self . get_thread_connection ( )
if connection . transaction_open :
self . commit ( )
self . begin ( )
yield
self . commit ( )
self . begin ( ) |
def get_bb_intersections ( recording ) :
"""Get all intersections of the bounding boxes of strokes .
Parameters
recording : list of lists of integers
Returns
A symmetrical matrix which indicates if two bounding boxes intersect .""" | intersections = numpy . zeros ( ( len ( recording ) , len ( recording ) ) , dtype = bool )
for i in range ( len ( recording ) - 1 ) :
a = geometry . get_bounding_box ( recording [ i ] ) . grow ( 0.2 )
for j in range ( i + 1 , len ( recording ) ) :
b = geometry . get_bounding_box ( recording [ j ] ) . grow ( 0.2 )
intersections [ i ] [ j ] = geometry . do_bb_intersect ( a , b )
intersections [ j ] [ i ] = intersections [ i ] [ j ]
return intersections |
def get_compositions_by_search ( self , composition_query , composition_search ) :
"""Gets the search results matching the given search query using the given search .
arg : composition _ query ( osid . repository . CompositionQuery ) :
the composition query
arg : composition _ search ( osid . repository . CompositionSearch ) :
the composition search
return : ( osid . repository . CompositionSearchResults ) - the
composition search results
raise : NullArgument - ` ` composition _ query ` ` or
` ` composition _ search ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - ` ` composition _ query ` ` or
` ` composition _ search ` ` is not of this service
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceSearchSession . get _ resources _ by _ search _ template
# Copied from osid . resource . ResourceQuerySession . get _ resources _ by _ query _ template
and_list = list ( )
or_list = list ( )
for term in composition_query . _query_terms :
and_list . append ( { term : composition_query . _query_terms [ term ] } )
for term in composition_query . _keyword_terms :
or_list . append ( { term : composition_query . _keyword_terms [ term ] } )
if composition_search . _id_list is not None :
identifiers = [ ObjectId ( i . identifier ) for i in composition_search . _id_list ]
and_list . append ( { '_id' : { '$in' : identifiers } } )
if or_list :
and_list . append ( { '$or' : or_list } )
view_filter = self . _view_filter ( )
if view_filter :
and_list . append ( view_filter )
if and_list :
query_terms = { '$and' : and_list }
collection = JSONClientValidated ( 'repository' , collection = 'Composition' , runtime = self . _runtime )
if composition_search . start is not None and composition_search . end is not None :
result = collection . find ( query_terms ) [ composition_search . start : composition_search . end ]
else :
result = collection . find ( query_terms )
return searches . CompositionSearchResults ( result , dict ( composition_query . _query_terms ) , runtime = self . _runtime ) |
def get_network ( self ) :
"""Identify the connected network . This call returns a
dictionary with keys chain _ id , core _ symbol and prefix""" | props = self . get_chain_properties ( )
chain_id = props [ "chain_id" ]
for k , v in known_chains . items ( ) :
if v [ "chain_id" ] == chain_id :
return v
raise Exception ( "Connecting to unknown network!" ) |
def fill_missing ( self ) :
"""We do not want TLSServerKeyExchange . build ( ) to overload and recompute
things every time it is called . This method can be called specifically
to have things filled in a smart fashion .
XXX We should account for the point _ format ( before ' point ' filling ) .""" | s = self . tls_session
if self . curve_type is None :
self . curve_type = _tls_ec_curve_types [ "named_curve" ]
if self . named_curve is None :
curve = ec . SECP256R1 ( )
s . server_kx_privkey = ec . generate_private_key ( curve , default_backend ( ) )
self . named_curve = next ( ( cid for cid , name in six . iteritems ( _tls_named_curves ) # noqa : E501
if name == curve . name ) , 0 )
else :
curve_name = _tls_named_curves . get ( self . named_curve )
if curve_name is None : # this fallback is arguable
curve = ec . SECP256R1 ( )
else :
curve_cls = ec . _CURVE_TYPES . get ( curve_name )
if curve_cls is None : # this fallback is arguable
curve = ec . SECP256R1 ( )
else :
curve = curve_cls ( )
s . server_kx_privkey = ec . generate_private_key ( curve , default_backend ( ) )
if self . point is None :
pubkey = s . server_kx_privkey . public_key ( )
try : # cryptography > = 2.5
self . point = pubkey . public_bytes ( serialization . Encoding . X962 , serialization . PublicFormat . UncompressedPoint )
except TypeError : # older versions
self . key_exchange = pubkey . public_numbers ( ) . encode_point ( )
# else , we assume that the user wrote the server _ kx _ privkey by himself
if self . pointlen is None :
self . pointlen = len ( self . point )
if not s . client_kx_ecdh_params :
s . client_kx_ecdh_params = curve |
def p_if_statement_delay ( self , p ) :
'if _ statement : delays IF LPAREN cond RPAREN true _ statement ELSE else _ statement' | p [ 0 ] = IfStatement ( p [ 4 ] , p [ 6 ] , p [ 8 ] , lineno = p . lineno ( 2 ) )
p . set_lineno ( 0 , p . lineno ( 2 ) ) |
def create_account_user ( self , account_id , body , ** kwargs ) : # noqa : E501
"""Create a new user . # noqa : E501
An endpoint for creating or inviting a new user to the account . In case of invitation email address is used only , other attributes are set in the 2nd step . * * Example usage : * * ` curl - X POST https : / / api . us - east - 1 . mbedcloud . com / v3 / accounts / { accountID } / users - d { \" email \" : \" myemail @ company . com \" } - H ' content - type : application / json ' - H ' Authorization : Bearer API _ KEY ' ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . create _ account _ user ( account _ id , body , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str account _ id : Account ID . ( required )
: param UserInfoReq body : A user object with attributes . ( required )
: param str action : Create or invite user .
: return : UserInfoResp
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . create_account_user_with_http_info ( account_id , body , ** kwargs )
# noqa : E501
else :
( data ) = self . create_account_user_with_http_info ( account_id , body , ** kwargs )
# noqa : E501
return data |
def filterVerticalLines ( arr , min_line_length = 4 ) :
"""Remove vertical lines in boolean array if linelength > = min _ line _ length""" | gy = arr . shape [ 0 ]
gx = arr . shape [ 1 ]
mn = min_line_length - 1
for i in range ( gy ) :
for j in range ( gx ) :
if arr [ i , j ] :
for d in range ( min_line_length ) :
if not arr [ i + d , j ] :
break
if d == mn :
d = 0
while True :
if not arr [ i + d , j ] :
break
arr [ i + d , j ] = 0
d += 1 |
def findfile ( self , old , new ) :
"""return name of file to be patched or None""" | if exists ( old ) :
return old
elif exists ( new ) :
return new
else : # [ w ] Google Code generates broken patches with its online editor
debug ( "broken patch from Google Code, stripping prefixes.." )
if old . startswith ( b'a/' ) and new . startswith ( b'b/' ) :
old , new = old [ 2 : ] , new [ 2 : ]
debug ( " %s" % old )
debug ( " %s" % new )
if exists ( old ) :
return old
elif exists ( new ) :
return new
return None |
def change_access_key ( self ) :
"""Change access key of your account .""" | method = 'POST'
endpoint = '/rest/v1/users/{}/accesskey/change' . format ( self . client . sauce_username )
return self . client . request ( method , endpoint ) |
def DeregisterPathSpec ( cls , path_spec_type ) :
"""Deregisters a path specification .
Args :
path _ spec _ type ( type ) : path specification type .
Raises :
KeyError : if path specification is not registered .""" | type_indicator = path_spec_type . TYPE_INDICATOR
if type_indicator not in cls . _path_spec_types :
raise KeyError ( 'Path specification type: {0:s} not set.' . format ( type_indicator ) )
del cls . _path_spec_types [ type_indicator ]
if type_indicator in cls . _system_level_type_indicators :
del cls . _system_level_type_indicators [ type_indicator ] |
def main ( args ) :
"""Main function which runs master .""" | if args . blacklisted_submissions :
logging . warning ( 'BLACKLISTED SUBMISSIONS: %s' , args . blacklisted_submissions )
if args . limited_dataset :
logging . info ( 'Using limited dataset: 3 batches * 10 images' )
max_dataset_num_images = 30
batch_size = 10
else :
logging . info ( 'Using full dataset. Batch size: %d' , DEFAULT_BATCH_SIZE )
max_dataset_num_images = None
batch_size = DEFAULT_BATCH_SIZE
random . seed ( )
print ( '\nRound: {0}\n' . format ( args . round_name ) )
eval_master = EvaluationMaster ( storage_client = eval_lib . CompetitionStorageClient ( args . project_id , args . storage_bucket ) , datastore_client = eval_lib . CompetitionDatastoreClient ( args . project_id , args . round_name ) , round_name = args . round_name , dataset_name = args . dataset_name , blacklisted_submissions = args . blacklisted_submissions , results_dir = args . results_dir , num_defense_shards = args . num_defense_shards , verbose = args . verbose , batch_size = batch_size , max_dataset_num_images = max_dataset_num_images )
if args . command == 'attack' :
eval_master . prepare_attacks ( )
elif args . command == 'defense' :
eval_master . prepare_defenses ( )
elif args . command == 'cleanup_defenses' :
eval_master . cleanup_defenses ( )
elif args . command == 'results' :
eval_master . compute_results ( )
elif args . command == 'status' :
eval_master . show_status ( )
elif args . command == 'cleanup_datastore' :
eval_master . cleanup_datastore ( )
elif args . command == 'cleanup_failed_attacks' :
eval_master . cleanup_failed_attacks ( )
elif args . command == 'cleanup_attacks_with_zero_images' :
eval_master . cleanup_attacks_with_zero_images ( )
else :
print ( 'Invalid command: ' , args . command )
print ( '' )
print ( USAGE ) |
def summarize ( logger ) :
"""Creates a short summary on the actions that were logged by the given
Logger .
: type logger : Logger
: param logger : The logger that recorded what happened in the queue .
: rtype : string
: return : A string summarizing the status of every performed task .""" | summary = [ ]
for log in logger . get_logs ( ) :
thestatus = log . has_error ( ) and log . get_error ( False ) or 'ok'
name = log . get_name ( )
summary . append ( name + ': ' + thestatus )
return '\n' . join ( summary ) |
def random_name ( num_surnames = 2 ) :
"""Returns a random person name
Arguments :
num _ surnames - - number of surnames""" | a = [ ]
# Prefix
if random . random ( ) < _PROB_PREF :
a . append ( _prefixes [ random . randint ( 0 , len ( _prefixes ) - 1 ) ] )
# Forename
a . append ( _forenames [ random . randint ( 0 , len ( _forenames ) - 1 ) ] )
# Surnames
for i in range ( num_surnames ) :
a . append ( _surnames [ random . randint ( 0 , len ( _surnames ) - 1 ) ] )
# Suffix
if random . random ( ) < _PROB_SUFF :
a . append ( _suffixes [ random . randint ( 0 , len ( _suffixes ) - 1 ) ] )
return " " . join ( a ) |
def get_requirement_files ( args = None ) :
"""Get the " best " requirements file we can find""" | if args and args . input_filename :
return [ args . input_filename ]
paths = [ ]
for regex in settings . REQUIREMENTS_SOURCE_GLOBS :
paths . extend ( glob . glob ( regex ) )
return paths |
def make_m_psd ( self , original_nu , feed_dictionary ) :
"""Run binary search to find a value for nu that makes M PSD
Args :
original _ nu : starting value of nu to do binary search on
feed _ dictionary : dictionary of updated lambda variables to feed into M
Returns :
new _ nu : new value of nu""" | feed_dict = feed_dictionary . copy ( )
_ , min_eig_val_m = self . get_lanczos_eig ( compute_m = True , feed_dict = feed_dict )
lower_nu = original_nu
upper_nu = original_nu
num_iter = 0
# Find an upper bound on nu
while min_eig_val_m - TOL < 0 and num_iter < ( MAX_BINARY_SEARCH_ITER / 2 ) :
num_iter += 1
upper_nu *= NU_UPDATE_CONSTANT
feed_dict . update ( { self . nu : upper_nu } )
_ , min_eig_val_m = self . get_lanczos_eig ( compute_m = True , feed_dict = feed_dict )
final_nu = upper_nu
# Perform binary search to find best value of nu
while lower_nu <= upper_nu and num_iter < MAX_BINARY_SEARCH_ITER :
num_iter += 1
mid_nu = ( lower_nu + upper_nu ) / 2
feed_dict . update ( { self . nu : mid_nu } )
_ , min_eig_val_m = self . get_lanczos_eig ( compute_m = True , feed_dict = feed_dict )
if min_eig_val_m - TOL < 0 :
lower_nu = mid_nu
else :
upper_nu = mid_nu
final_nu = upper_nu
return final_nu |
def scan_dir ( dirname , tags = None , md5_hash = False ) :
'''scans a directory tree and returns a dictionary with files and key DICOM tags
return value is a dictionary absolute filenames as keys and with dictionaries of tags / values
as values
the param ` ` tags ` ` is the list of DICOM tags ( given as tuples of hex numbers ) that
will be obtained for each file . If not given ,
the default list is :
:0008 0021 : Series date
:0008 0031 : Series time
:0008 103E : Series description
:0008 0080 : Institution name
:0010 0020 : Patient ID
:0028 0010 : Image rows
:0028 0011 : Image columns
If the param ` ` md5 _ hash ` ` is ` ` True ` ` , this will also return the MD5 hash of the file . This is useful
for detecting duplicate files''' | if tags == None :
tags = [ ( 0x0008 , 0x0021 ) , ( 0x0008 , 0x0031 ) , ( 0x0008 , 0x103E ) , ( 0x0008 , 0x0080 ) , ( 0x0010 , 0x0020 ) , ( 0x0028 , 0x0010 ) , ( 0x0028 , 0x0011 ) , ]
return_dict = { }
for root , dirs , files in os . walk ( dirname ) :
for filename in files :
fullname = os . path . join ( root , filename )
if is_dicom ( fullname ) :
return_dict [ fullname ] = info_for_tags ( fullname , tags )
if md5_hash :
return_dict [ fullname ] [ 'md5' ] = nl . hash ( fullname )
return return_dict |
def RunOnce ( self , names = None , token = None ) :
"""Tries to lock and run cron jobs .
Args :
names : List of cron jobs to run . If unset , run them all .
token : security token .
Raises :
OneOrMoreCronJobsFailedError : if one or more individual cron jobs fail .
Note : a failure of a single cron job doesn ' t preclude other cron jobs
from running .""" | del token
leased_jobs = data_store . REL_DB . LeaseCronJobs ( cronjob_ids = names , lease_time = rdfvalue . Duration ( "10m" ) )
logging . info ( "Leased %d cron jobs for processing." , len ( leased_jobs ) )
if not leased_jobs :
return
errors = { }
processed_count = 0
for job in sorted ( leased_jobs , key = lambda j : j . cron_job_id ) :
if self . TerminateStuckRunIfNeeded ( job ) :
continue
if not self . JobDueToRun ( job ) :
continue
try :
if self . RunJob ( job ) :
processed_count += 1
else :
logging . info ( "Can't schedule cron job %s on a thread pool " "(all threads are busy or CPU load is high)" , job . cron_job_id )
break
except Exception as e : # pylint : disable = broad - except
logging . exception ( "Cron job %s has failed: %s" , job . cron_job_id , e )
errors [ job . cron_job_id ] = e
logging . info ( "Processed %d cron jobs." , processed_count )
data_store . REL_DB . ReturnLeasedCronJobs ( leased_jobs )
if errors :
raise OneOrMoreCronJobsFailedError ( errors ) |
def _read_single ( parser , filepath ) :
"""Reads a single config file into the parser , silently failing if the file
does not exist .
Args :
parser ( ConfigParser ) : parser to read the file into .
filepath ( str ) : full path to the config file .""" | from os import path
global packages
if path . isfile ( filepath ) :
parser . readfp ( open ( filepath ) ) |
def filter ( self , ** kwargs ) :
"""Filter the base queryset .""" | assert not self . _primary_keys
self . queryset = self . queryset . filter ( ** kwargs )
return self |
def randomreads ( reference , length , reads , out_fastq , paired = False , returncmd = False , ** kwargs ) :
"""Wrapper for bbmap . Assumes that bbmap executable is in your $ PATH .
: param reference : Reference fasta . Won ' t be written to disk by default . If you want it to be , add nodisk = ' t ' as an arg .
: param length : Length of reads to simulate
: param reads : Number of reads to simulate
: param out _ fastq : Output file . Should end in . fastq or . fastq . gz
: param paired : Create paired FASTQ files rather than single
: param returncmd : If set to true , function will return the cmd string passed to subprocess as a third value .
: param kwargs : Other arguments to give to bbmap in parameter = argument format . See documentation for full list .
: return : out and err ( and cmd if specified ) : stdout string and stderr string from running bbmap .""" | options = kwargs_to_string ( kwargs )
# If the paired option is selected , set the name of the reverse reads to be the same as the forward reads
# but replace _ R1 with _ R2
if paired :
out_fastq2 = out_fastq . replace ( '_R1' , '_R2' )
# Create the call to randomreads - use paired = t
cmd = 'randomreads.sh ref={ref} out={out} out2={out2} length={length} reads={reads} paired=t{options}' . format ( ref = reference , out = out_fastq , out2 = out_fastq2 , length = length , reads = reads , options = options )
else :
cmd = 'randomreads.sh ref={ref} out={out} length={length} reads={reads}{options}' . format ( ref = reference , out = out_fastq , length = length , reads = reads , options = options )
if not os . path . isfile ( out_fastq ) :
out , err = accessoryfunctions . run_subprocess ( cmd )
else :
out = str ( )
err = str ( )
if returncmd :
return out , err , cmd
else :
return out , err |
def _setable_get_ ( name , self ) :
"Used to raise an exception for attributes unable to be evaluated yet ." | raise AttributeError ( "'{typename}' object has no attribute '{name}'" . format ( typename = type ( self ) . __name__ , name = name ) ) |
def derivative ( self , t , n = 1 ) :
"""returns the nth derivative of the segment at t .
Note : Bezier curves can have points where their derivative vanishes .
If you are interested in the tangent direction , use the unit _ tangent ( )
method instead .""" | p = self . bpoints ( )
if n == 1 :
return 3 * ( p [ 1 ] - p [ 0 ] ) * ( 1 - t ) ** 2 + 6 * ( p [ 2 ] - p [ 1 ] ) * ( 1 - t ) * t + 3 * ( p [ 3 ] - p [ 2 ] ) * t ** 2
elif n == 2 :
return 6 * ( ( 1 - t ) * ( p [ 2 ] - 2 * p [ 1 ] + p [ 0 ] ) + t * ( p [ 3 ] - 2 * p [ 2 ] + p [ 1 ] ) )
elif n == 3 :
return 6 * ( p [ 3 ] - 3 * ( p [ 2 ] - p [ 1 ] ) - p [ 0 ] )
elif n > 3 :
return 0
else :
raise ValueError ( "n should be a positive integer." ) |
def authors ( self ) :
"""List of : class : ` ~ zenodio . harvest . Author ` \ s
( : class : ` zenodio . harvest . Author ` ) .
Authors correspond to ` creators ` in the Datacite schema .""" | creators = _pluralize ( self . _r [ 'creators' ] , 'creator' )
authors = [ Author . from_xmldict ( c ) for c in creators ]
return authors |
def exists ( self , symbol ) :
"""Checks to if a symbol exists , by name .
Parameters
symbol : str or Symbol
Returns
bool""" | if isinstance ( symbol , str ) :
sym = symbol
elif isinstance ( symbol , Symbol ) :
sym = symbol . name
syms = self . ses . query ( Symbol ) . filter ( Symbol . name == sym ) . all ( )
if len ( syms ) == 0 :
return False
else :
return True |
def wait ( self , timeout = None ) : # type : ( Optional [ int ] ) - > None
"""Wait on the long running operation for a specified length
of time . You can check if this call as ended with timeout with the
" done ( ) " method .
: param int timeout : Period of time to wait for the long running
operation to complete ( in seconds ) .
: raises CloudError : Server problem with the query .""" | if self . _thread is None :
return
self . _thread . join ( timeout = timeout )
try : # Let ' s handle possible None in forgiveness here
raise self . _exception
# type : ignore
except TypeError : # Was None
pass |
def update_annotation_version ( xml_file ) :
"""Update the fields that have changed over different versions .
Parameters
xml _ file : path to file
xml file with the sleep scoring
Notes
new in version 4 : use ' marker _ name ' instead of simply ' name ' etc
new in version 5 : use ' bookmark ' instead of ' marker '""" | with open ( xml_file , 'r' ) as f :
s = f . read ( )
m = search ( '<annotations version="([0-9]*)">' , s )
current = int ( m . groups ( ) [ 0 ] )
if current < 4 :
s = sub ( '<marker><name>(.*?)</name><time>(.*?)</time></marker>' , '<marker><marker_name>\g<1></marker_name><marker_start>\g<2></marker_start><marker_end>\g<2></marker_end><marker_chan/></marker>' , s )
if current < 5 :
s = s . replace ( 'marker' , 'bookmark' )
# note indentation
s = sub ( '<annotations version="[0-9]*">' , '<annotations version="5">' , s )
with open ( xml_file , 'w' ) as f :
f . write ( s ) |
def remove_all ( self ) :
"""Remove all actions .""" | names = sorted ( self . _actions_dict . keys ( ) )
for name in names :
self . remove ( name ) |
def symbol_search ( self , search_terms ) :
"""Search for symbols matching a set of keywords""" | self . log . debug ( 'symbol_search: in' )
if not search_terms :
self . editor . message ( 'symbol_search_symbol_required' )
return
req = { "typehint" : "PublicSymbolSearchReq" , "keywords" : search_terms , "maxResults" : 25 }
self . send_request ( req ) |
def cli ( env , identifier ) :
"""List server credentials .""" | manager = SoftLayer . HardwareManager ( env . client )
hardware_id = helpers . resolve_id ( manager . resolve_ids , identifier , 'hardware' )
instance = manager . get_hardware ( hardware_id )
table = formatting . Table ( [ 'username' , 'password' ] )
for item in instance [ 'softwareComponents' ] :
if 'passwords' not in item :
raise exceptions . SoftLayerError ( "No passwords found in softwareComponents" )
for credentials in item [ 'passwords' ] :
table . add_row ( [ credentials . get ( 'username' , 'None' ) , credentials . get ( 'password' , 'None' ) ] )
env . fout ( table ) |
def yyparse ( self , cfgfile , splitstring = 0 ) :
"""Args :
cfgfile ( str ) : The path for the file containing the CFG rules
splitstring ( bool ) : A boolean for enabling or disabling
the splitting of symbols using a space
Returns :
PDA : The generated PDA""" | re_grammar = self . _read_file ( cfgfile )
mma = self . _mpda ( re_grammar , splitstring )
return mma |
def create_calendar_resource ( self , name , password = None , attrs = { } ) :
""": param : attrs a dict of attributes , must specify the displayName and
zimbraCalResType""" | args = { 'name' : name , 'a' : [ { 'n' : k , '_content' : v } for k , v in attrs . items ( ) ] }
if password :
args [ 'password' ] = password
resp = self . request_single ( 'CreateCalendarResource' , args )
return zobjects . CalendarResource . from_dict ( resp ) |
def get_next_index ( self , matrix , manipulation , indices_left ) :
"""Returns an index that should have the most negative effect on the
matrix sum""" | f = manipulation [ 0 ]
indices = list ( indices_left . intersection ( manipulation [ 2 ] ) )
sums = np . sum ( matrix [ indices ] , axis = 1 )
if f < 1 :
next_index = indices [ sums . argmax ( axis = 0 ) ]
else :
next_index = indices [ sums . argmin ( axis = 0 ) ]
return next_index |
def cortex_to_image_interpolation ( obj , mask = None , affine = None , method = 'linear' , shape = None ) :
'''cortex _ to _ image _ interpolation ( obj ) yields a tuple ( indices , interp ) where indices is a tuple of
voxel indices and interp is an interpolation matrix that converts a vector of cortical
surface vertex values into a vector of of voxel values with the same ordering as those given
in indices .
The argument obj may be either a subject or a cortex or a tuple of ( lh , rh ) cortices . If the obj
specifies two cortices , then the interp matrix will be arranged such that the vertex - value
vector with which interp should be multiplied must first list the LH values then the RH values ;
i . e . , image [ indices ] = interp . dot ( join ( lh _ values , rh _ values ) ) .
The following options are accepted :
* mask ( default : None ) specifies the mask of vertices that should be interpolated ; if this is
None , then this will attempt to use the gray _ indices of the object if the object is a
subject , otherwise this is equivalent to ' all ' . The special value ' all ' indicates that all
vertices overlapping with the cortex should be interpolated . Mask may be given as a boolean
mask or as a set or as a tuple / matrix of indices equivalent to numpy . where ( binary _ mask ) or
its transpose . The value indices that is returned will be identical to mask if mask is given
in the same format .
* affine ( default : None ) specifies the affine transform that is used to align the vertices
with the voxels . In voxel - space , the voxel with index ( i , j , k ) is centered at at the point
( i , j , k ) and is 1 unit wide in every direction . If the value None is given , will attempt to
use the object ' s vertex _ to _ voxel _ matrix if object is a subject , otherwise will use a
FreeSurfer - like orientation that places the vertex origin in the center of the image .
* shape ( default : None ) specifies the size of the resulting image as a tuple . If None is given
then the function attempts to deduce the correct shape ; if obj is a subject , then its
image _ dimensions tuple is used . Otherwise , the size is deduced from the mask , if possible ;
if it cannot be deduced from the mask , then ( 256 , 256 , 256 ) is used .
* method ( default : ' linear ' ) specifies the method to use . May be ' linear ' , ' heaviest ' , or
' nearest ' .''' | # get the min / max values of the coordinates ( for deducing sizes , if necessary )
if mask is None :
if shape is None and affine is None : # we have no way to deduce anything
shape = ( 256 , 256 , 256 )
affine = ( [ [ - 1 , 0 , 0 ] , [ 0 , 0 , - 1 ] , [ 0 , 1 , 0 ] ] , [ 128 , 128 , 128 ] )
elif shape is None :
shape = ( 256 , 256 , 256 )
elif affine is None :
affine = ( [ [ - 1 , 0 , 0 ] , [ 0 , 0 , - 1 ] , [ 0 , 1 , 0 ] ] , np . asarray ( shape ) / 2 )
mask = obj . gray_indices if isinstance ( obj , Subject ) else 'all'
# okay , having handled that no - arg case , lets parse the argument we have
if pimms . is_matrix ( mask ) : # we take this to be the list ; we don ' t chante its order
if not is_tuple ( mask ) or len ( mask ) != 3 :
mask = np . asarray ( mask , dtype = np . int )
mask = tuple ( mask . T if mask . shape [ 0 ] != 3 else mask )
elif isinstance ( mask , colls . Set ) : # we have to convert this into a propert mask
mask = np . asarray ( list ( mask ) ) . T
tmp = np . full ( shape , False )
tmp [ tuple ( mask ) ] = True
mask = np . where ( tmp )
elif pimms . is_array ( mask , dims = 3 ) :
if shape is None :
shape = mask . shape
mask = np . where ( mask )
elif pimms . is_str ( mask ) and mask . lower ( ) == 'all' :
if shape is None :
shape = ( 256 , 256 , 256 )
mask = np . where ( np . ones ( shape ) )
else :
raise ValueError ( 'Could not understand mask argument' )
# at this point , we can deduce shape from mask and affine from shape
if shape is None :
shape = np . asarray ( np . ceil ( np . max ( mask , axis = 0 ) ) , dtype = np . int )
if affine is None :
if isinstance ( obj , Subject ) :
affine = obj . vertex_to_voxel_matrix
else :
affine = ( [ [ - 1 , 0 , 0 ] , [ 0 , 0 , - 1 ] , [ 0 , 1 , 0 ] ] , np . asarray ( shape ) / 2 )
affine = to_affine ( affine , 3 )
hems = ( obj . lh , obj . rh ) if isinstance ( obj , Subject ) else ( obj , )
# all arguments are basically pre - processed ; we just need to make the interpolation
method = 'auto' if method is None else method . lower ( )
if method in [ 'linear' , 'auto' , 'automatic' ] :
interp = [ _vertex_to_voxel_linear_interpolation ( h , mask , shape , affine ) for h in hems ]
if len ( interp ) == 1 :
interp = interp [ 0 ]
else :
interp = sps . hstack ( interp )
elif method in [ 'lines' , 'line' ] :
interp = [ _vertex_to_voxel_lines_interpolation ( h , mask , shape , affine ) for h in hems ]
if len ( interp ) == 1 :
interp = interp [ 0 ]
else :
interp = sps . hstack ( interp )
elif method in [ 'heaviest' , 'heavy' , 'weight' , 'weightiest' ] :
interp = [ _vertex_to_voxel_linear_interpolation ( h , mask , shape , affine ) for h in hems ]
if len ( interp ) == 1 :
interp = interp [ 0 ]
else :
interp = sps . hstack ( interp )
# convert to binary matrix :
( rs , cs ) = interp . shape
argmaxs = np . asarray ( interp . argmax ( axis = 1 ) ) [ : , 0 ]
return sps . csr_matrix ( ( np . ones ( rs , dtype = np . int ) , ( range ( rs ) , argmaxs ) ) , shape = interp . shape , dtype = np . int )
elif method in [ 'nearest' , 'near' , 'nearest-neighbor' , 'nn' ] :
aff = npla . inv ( affine )
interp = [ _vertex_to_voxel_nearest_interpolation ( h , mask , aff ) for h in hems ]
if len ( interp ) == 1 :
interp = interp [ 0 ]
else :
interp = sps . hstack ( interp )
else :
raise ValueError ( 'unsupported method: %s' % method )
# That ' s it ; we have the interp matrix and the indices
return ( mask , interp ) |
def stream_filesystem_node ( path , recursive = False , patterns = '**' , chunk_size = default_chunk_size ) :
"""Gets a buffered generator for streaming either files or directories .
Returns a buffered generator which encodes the file or directory at the
given path as : mimetype : ` multipart / form - data ` with the corresponding
headers .
Parameters
path : str
The filepath of the directory or file to stream
recursive : bool
Stream all content within the directory recursively ?
patterns : str | list
Single * glob * pattern or list of * glob * patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk _ size : int
Maximum size of each stream chunk""" | is_dir = isinstance ( path , six . string_types ) and os . path . isdir ( path )
if recursive or is_dir :
return stream_directory ( path , recursive , patterns , chunk_size )
else :
return stream_files ( path , chunk_size ) |
def convert ( amount , source , target = 'CZK' , date = None , percent = 0 , valid_days_max = None ) :
"""without target parameter returns equivalent of amount + source in CZK
with target parameter returns equivalent of amount + source in given currency
you can calculate with regard to ( given ) date
you can add additional margin with percent parameter
valid _ days _ max : see rate ( )""" | if source . upper ( ) == 'CZK' :
czk = amount
else :
czk = amount * rate ( source , date , valid_days_max = valid_days_max )
result = convert_to ( target , czk , date , valid_days_max = valid_days_max )
return modified ( result , percent ) |
def to_png ( self , transparent = True , thumbnail_size = None , resampling = None , in_range = 'dtype' , out_range = 'dtype' ) :
"""Convert to png format ( discarding geo ) .
Optionally also resizes .
Note : for color images returns interlaced .
: param transparent : if True - sets alpha channel for nodata pixels
: param thumbnail _ size : if not None - resize to thumbnail size , e . g . 512
: param in _ range : input intensity range
: param out _ range : output intensity range
: param resampling : one of Resampling enums
: return bytes""" | return self . to_bytes ( transparent = transparent , thumbnail_size = thumbnail_size , resampling = resampling , in_range = in_range , out_range = out_range ) |
def lookup ( self , inc_raw = False , retry_count = 3 , response = None , get_referral = False , extra_blacklist = None , ignore_referral_errors = False , asn_data = None , field_list = None , is_offline = False ) :
"""The function for retrieving and parsing whois information for an IP
address via port 43 / tcp ( WHOIS ) .
Args :
inc _ raw ( : obj : ` bool ` , optional ) : Whether to include the raw
results in the returned dictionary . Defaults to False .
retry _ count ( : obj : ` int ` ) : The number of times to retry in case
socket errors , timeouts , connection resets , etc . are
encountered . Defaults to 3.
response ( : obj : ` str ` ) : Optional response object , this bypasses the
NIR lookup . Required when is _ offline = True .
get _ referral ( : obj : ` bool ` ) : Whether to retrieve referral whois
information , if available . Defaults to False .
extra _ blacklist ( : obj : ` list ` ) : Blacklisted whois servers in
addition to the global BLACKLIST . Defaults to None .
ignore _ referral _ errors ( : obj : ` bool ` ) : Whether to ignore and
continue when an exception is encountered on referral whois
lookups . Defaults to False .
asn _ data ( : obj : ` dict ` ) : Result from
: obj : ` ipwhois . asn . IPASN . lookup ` ( required ) .
field _ list ( : obj : ` list ` of : obj : ` str ` ) : If provided , fields to
parse . Defaults to :
[ ' name ' , ' handle ' , ' description ' , ' country ' , ' state ' ,
' city ' , ' address ' , ' postal _ code ' , ' emails ' , ' created ' ,
' updated ' ]
is _ offline ( : obj : ` bool ` ) : Whether to perform lookups offline . If
True , response and asn _ data must be provided . Primarily used
for testing . Defaults to False .
Returns :
dict : The IP whois lookup results
' query ' ( str ) - The IP address
' asn ' ( str ) - The Autonomous System Number
' asn _ date ' ( str ) - The ASN Allocation date
' asn _ registry ' ( str ) - The assigned ASN registry
' asn _ cidr ' ( str ) - The assigned ASN CIDR
' asn _ country _ code ' ( str ) - The assigned ASN country code
' asn _ description ' ( str ) - The ASN description
' nets ' ( list ) - Dictionaries containing network
information which consists of the fields listed in the
ipwhois . whois . RIR _ WHOIS dictionary .
' raw ' ( str ) - Raw whois results if the inc _ raw parameter
is True .
' referral ' ( dict ) - Referral whois information if
get _ referral is True and the server is not blacklisted .
Consists of fields listed in the ipwhois . whois . RWHOIS
dictionary .
' raw _ referral ' ( str ) - Raw referral whois results if the
inc _ raw parameter is True .""" | # Create the return dictionary .
results = { 'query' : self . _net . address_str , 'nets' : [ ] , 'raw' : None , 'referral' : None , 'raw_referral' : None }
# The referral server and port . Only used if get _ referral is True .
referral_server = None
referral_port = 0
# Only fetch the response if we haven ' t already .
if response is None or ( not is_offline and asn_data [ 'asn_registry' ] is not 'arin' ) :
log . debug ( 'Response not given, perform WHOIS lookup for {0}' . format ( self . _net . address_str ) )
# Retrieve the whois data .
response = self . _net . get_whois ( asn_registry = asn_data [ 'asn_registry' ] , retry_count = retry_count , extra_blacklist = extra_blacklist )
if get_referral : # Search for a referral server .
for match in re . finditer ( r'^ReferralServer:[^\S\n]+(.+:[0-9]+)$' , response , re . MULTILINE ) :
try :
temp = match . group ( 1 )
if 'rwhois://' not in temp : # pragma : no cover
raise ValueError
temp = temp . replace ( 'rwhois://' , '' ) . split ( ':' )
if int ( temp [ 1 ] ) > 65535 : # pragma : no cover
raise ValueError
referral_server = temp [ 0 ]
referral_port = int ( temp [ 1 ] )
except ( ValueError , KeyError ) : # pragma : no cover
continue
break
# Retrieve the referral whois data .
if get_referral and referral_server :
log . debug ( 'Perform referral WHOIS lookup' )
response_ref = None
try :
response_ref = self . _net . get_whois ( asn_registry = '' , retry_count = retry_count , server = referral_server , port = referral_port , extra_blacklist = extra_blacklist )
except ( BlacklistError , WhoisLookupError ) :
if ignore_referral_errors :
pass
else :
raise
if response_ref :
log . debug ( 'Parsing referral WHOIS data' )
if inc_raw :
results [ 'raw_referral' ] = response_ref
temp_rnet = self . parse_fields ( response_ref , RWHOIS [ 'fields' ] , field_list = field_list )
# Add the networks to the return dictionary .
results [ 'referral' ] = temp_rnet
# If inc _ raw parameter is True , add the response to return dictionary .
if inc_raw :
results [ 'raw' ] = response
nets = [ ]
if asn_data [ 'asn_registry' ] == 'arin' :
nets_response = self . get_nets_arin ( response )
elif asn_data [ 'asn_registry' ] == 'lacnic' :
nets_response = self . get_nets_lacnic ( response )
else :
nets_response = self . get_nets_other ( response )
nets . extend ( nets_response )
# Iterate through all of the network sections and parse out the
# appropriate fields for each .
log . debug ( 'Parsing WHOIS data' )
for index , net in enumerate ( nets ) :
section_end = None
if index + 1 < len ( nets ) :
section_end = nets [ index + 1 ] [ 'start' ]
try :
dt_format = RIR_WHOIS [ results [ 'asn_registry' ] ] [ 'dt_format' ]
except KeyError :
dt_format = None
temp_net = self . parse_fields ( response , RIR_WHOIS [ asn_data [ 'asn_registry' ] ] [ 'fields' ] , section_end , net [ 'end' ] , dt_format , field_list )
# Merge the net dictionaries .
net . update ( temp_net )
# The start and end values are no longer needed .
del net [ 'start' ] , net [ 'end' ]
# Add the networks to the return dictionary .
results [ 'nets' ] = nets
return results |
def send ( self , request , stream = False , timeout = None , verify = True , cert = None , proxies = None ) :
"""Sends PreparedRequest object . Returns Response object .
: param request : The : class : ` PreparedRequest < PreparedRequest > ` being sent .
: param stream : ( optional ) Whether to stream the request content .
: param timeout : ( optional ) How long to wait for the server to send
data before giving up , as a float , or a ( ` connect timeout , read
timeout < user / advanced . html # timeouts > ` _ ) tuple .
: type timeout : float or tuple
: param verify : ( optional ) Whether to verify SSL certificates .
: param cert : ( optional ) Any user - provided SSL certificate to be trusted .
: param proxies : ( optional ) The proxies dictionary to apply to the request .""" | conn = self . get_connection ( request . url , proxies )
self . cert_verify ( conn , request . url , verify , cert )
url = self . request_url ( request , proxies )
self . add_headers ( request )
chunked = not ( request . body is None or 'Content-Length' in request . headers )
if isinstance ( timeout , tuple ) :
try :
connect , read = timeout
timeout = TimeoutSauce ( connect = connect , read = read )
except ValueError as e : # this may raise a string formatting error .
err = ( "Invalid timeout {0}. Pass a (connect, read) " "timeout tuple, or a single float to set " "both timeouts to the same value" . format ( timeout ) )
raise ValueError ( err )
else :
timeout = TimeoutSauce ( connect = timeout , read = timeout )
try :
if not chunked :
resp = conn . urlopen ( method = request . method , url = url , body = request . body , headers = request . headers , redirect = False , assert_same_host = False , preload_content = False , decode_content = False , retries = Retry ( self . max_retries , read = False ) , timeout = timeout )
# Send the request .
else :
if hasattr ( conn , 'proxy_pool' ) :
conn = conn . proxy_pool
low_conn = conn . _get_conn ( timeout = timeout )
try :
low_conn . putrequest ( request . method , url , skip_accept_encoding = True )
for header , value in request . headers . items ( ) :
low_conn . putheader ( header , value )
low_conn . endheaders ( )
for i in request . body :
low_conn . send ( hex ( len ( i ) ) [ 2 : ] . encode ( 'utf-8' ) )
low_conn . send ( b'\r\n' )
low_conn . send ( i )
low_conn . send ( b'\r\n' )
low_conn . send ( b'0\r\n\r\n' )
r = low_conn . getresponse ( )
resp = HTTPResponse . from_httplib ( r , pool = conn , connection = low_conn , preload_content = False , decode_content = False )
except : # If we hit any problems here , clean up the connection .
# Then , reraise so that we can handle the actual exception .
low_conn . close ( )
raise
else : # All is well , return the connection to the pool .
conn . _put_conn ( low_conn )
except ( ProtocolError , socket . error ) as err :
raise ConnectionError ( err , request = request )
except MaxRetryError as e :
if isinstance ( e . reason , ConnectTimeoutError ) :
raise ConnectTimeout ( e , request = request )
raise ConnectionError ( e , request = request )
except _ProxyError as e :
raise ProxyError ( e )
except ( _SSLError , _HTTPError ) as e :
if isinstance ( e , _SSLError ) :
raise SSLError ( e , request = request )
elif isinstance ( e , ReadTimeoutError ) :
raise ReadTimeout ( e , request = request )
else :
raise
return self . build_response ( request , resp ) |
def delete_attachment ( self , attachment , headers = None ) :
"""Removes an attachment from a remote document and refreshes the locally
cached document object .
: param str attachment : Attachment file name used to identify the
attachment .
: param dict headers : Optional , additional headers to be sent
with request .
: returns : Attachment deletion status in JSON format""" | # need latest rev
self . fetch ( )
attachment_url = '/' . join ( ( self . document_url , attachment ) )
if headers is None :
headers = { 'If-Match' : self [ '_rev' ] }
else :
headers [ 'If-Match' ] = self [ '_rev' ]
resp = self . r_session . delete ( attachment_url , headers = headers )
resp . raise_for_status ( )
super ( Document , self ) . __setitem__ ( '_rev' , response_to_json_dict ( resp ) [ 'rev' ] )
# Execute logic only if attachment metadata exists locally
if self . get ( '_attachments' ) : # Remove the attachment metadata for the specified attachment
if self [ '_attachments' ] . get ( attachment ) :
self [ '_attachments' ] . __delitem__ ( attachment )
# Remove empty attachment metadata from the local dictionary
if not self [ '_attachments' ] :
super ( Document , self ) . __delitem__ ( '_attachments' )
return response_to_json_dict ( resp ) |
def ids_from_seq_two_step ( seq , n , max_iterations , app , core_threshold , extra_threshold , lower_threshold , second_db = None ) :
"""Returns ids that match a seq , using a 2 - tiered strategy .
Optionally uses a second database for the second search .""" | # first time through : reset ' h ' and ' e ' to core
# - h is the e - value threshold for including seqs in the score matrix model
app . Parameters [ '-h' ] . on ( core_threshold )
# - e is the e - value threshold for the final blast
app . Parameters [ '-e' ] . on ( core_threshold )
checkpoints = [ ]
ids = [ ]
last_num_ids = None
for i in range ( max_iterations ) :
if checkpoints :
app . Parameters [ '-R' ] . on ( checkpoints [ - 1 ] )
curr_check = 'checkpoint_%s.chk' % i
app . Parameters [ '-C' ] . on ( curr_check )
output = app ( seq )
# if we didn ' t write a checkpoint , bail out
if not access ( curr_check , F_OK ) :
break
# if we got here , we wrote a checkpoint file
checkpoints . append ( curr_check )
result = list ( output . get ( 'BlastOut' , output [ 'StdOut' ] ) )
output . cleanUp ( )
if result :
ids = LastProteinIds9 ( result , keep_values = True , filter_identity = False )
num_ids = len ( ids )
if num_ids >= n :
break
if num_ids == last_num_ids :
break
last_num_ids = num_ids
# if we didn ' t write any checkpoints , second run won ' t work , so return ids
if not checkpoints :
return ids
# if we got too many ids and don ' t have a second database , return the ids we got
if ( not second_db ) and num_ids >= n :
return ids
# second time through : reset ' h ' and ' e ' to get extra hits , and switch the
# database if appropriate
app . Parameters [ '-h' ] . on ( extra_threshold )
app . Parameters [ '-e' ] . on ( lower_threshold )
if second_db :
app . Parameters [ '-d' ] . on ( second_db )
for i in range ( max_iterations ) : # will always have last _ check if we get here
app . Parameters [ '-R' ] . on ( checkpoints [ - 1 ] )
curr_check = 'checkpoint_b_%s.chk' % i
app . Parameters [ '-C' ] . on ( curr_check )
output = app ( seq )
# bail out if we couldn ' t write a checkpoint
if not access ( curr_check , F_OK ) :
break
# if we got here , the checkpoint worked
checkpoints . append ( curr_check )
result = list ( output . get ( 'BlastOut' , output [ 'StdOut' ] ) )
if result :
ids = LastProteinIds9 ( result , keep_values = True , filter_identity = False )
num_ids = len ( ids )
if num_ids >= n :
break
if num_ids == last_num_ids :
break
last_num_ids = num_ids
# return the ids we got . may not be as many as we wanted .
for c in checkpoints :
remove ( c )
return ids |
def assert_stmt ( self ) :
'''Returns a string displaying the whole statement that failed ,
with a ' > ' indicator on the line starting the expression .''' | # This will be used by linecache to read the source of this
# module . See the docstring for _ find _ assert _ stmt below which
# explains how .
# We don ' t have a test for this because automating the
# creation of an egg , installation into an environment ,
# running of tests , and verification that marbles found the
# right source and was able to print it is a lot of
# automation . We have tested manually , and marbles works with
# all check installation mechanisms we know of right now
# ( setup . py install , setup . py develop , pip install , bdist _ egg ,
# bdist _ wheel ) .
module_globals = vars ( sys . modules [ self . module ] )
line_range , lineno = self . _find_assert_stmt ( self . filename , self . linenumber , module_globals = module_globals )
source = [ linecache . getline ( self . filename , x , module_globals = module_globals ) for x in line_range ]
# Dedent the source , removing the final newline added by dedent
dedented_lines = textwrap . dedent ( '' . join ( source ) ) . split ( '\n' ) [ : - 1 ]
formatted_lines = [ ]
for i , line in zip ( line_range , dedented_lines ) :
prefix = '>' if i == lineno else ' '
formatted_lines . append ( ' {0} {1:4d} {2}' . format ( prefix , i , line ) )
return '\n' . join ( formatted_lines ) |
def convolve ( sequence , rule , ** kwds ) :
"""Wrapper around scipy . ndimage . convolve1d that allows complex input .""" | dtype = np . result_type ( float , np . ravel ( sequence ) [ 0 ] )
seq = np . asarray ( sequence , dtype = dtype )
if np . iscomplexobj ( seq ) :
return ( convolve1d ( seq . real , rule , ** kwds ) + 1j * convolve1d ( seq . imag , rule , ** kwds ) )
return convolve1d ( seq , rule , ** kwds ) |
def process_question ( self ) :
"""Parse specific fields in ` self . raw _ data ` that needs to have image strings
processed : repalced by references to ` CONTENTSTORAGE ` + added as files .
Returns : list of all files needed to render this question .""" | image_files = [ ]
question_data = json . loads ( self . raw_data )
# process urls for widgets
self . _recursive_url_find ( question_data , image_files )
# Process question
if 'question' in question_data and 'images' in question_data [ 'question' ] :
question_data [ 'question' ] [ 'images' ] , qfiles = self . process_image_field ( question_data [ 'question' ] )
image_files += qfiles
# Process hints
if 'hints' in question_data :
for hint in question_data [ 'hints' ] :
if 'images' in hint :
hint [ 'images' ] , hfiles = self . process_image_field ( hint )
image_files += hfiles
# Process answers
if 'answers' in question_data :
for answer in question_data [ 'answers' ] :
if 'images' in answer :
answer [ 'images' ] , afiles = self . process_image_field ( answer )
image_files += afiles
# Process raw data
self . raw_data = json . dumps ( question_data , ensure_ascii = False )
# Assume no need for special HTML processing for Persues questions
# This avoids probelms with questions that contain < and > inequalities
# in formulas that get erroneously parsed as HTML tags
self . raw_data , data_files = super ( PerseusQuestion , self ) . set_images ( self . raw_data , parse_html = False )
# Combine all files processed
self . files = image_files + data_files
# Return all filenames
return [ f . filename for f in self . files ] |
def delete_question_answer ( self , number : str ) -> bool :
"""刪除特定題目的作業""" | try : # 操作所需資訊
params = { 'title' : number }
# 刪除作業
response = self . __session . get ( self . __url + '/delHw' , params = params , timeout = 0.5 , verify = False )
soup = BeautifulSoup ( response . text , 'html.parser' )
# 回傳結果
return soup . find ( 'body' ) . get_text ( ) . replace ( '\n' , '' ) . strip ( ) == 'delete success'
except requests . exceptions . Timeout :
return None |
def _pred_sets_are_in_conflict ( pred_set_a , pred_set_b ) :
"""Find conflict in sets , return conflict if found , else None .""" | # pred _ sets conflict if we cannot find one shared predicate that is " negated " in one
# and " non - negated " in the other
for pred_a , bool_a in pred_set_a :
for pred_b , bool_b in pred_set_b :
if pred_a is pred_b and bool_a != bool_b :
return False
return True |
def get_global_achievement_percentages_for_app ( self , gameID , format = None ) :
"""Request statistics showing global achievements that have been
unlocked .
gameID : The id of the game .
format : Return format . None defaults to json . ( json , xml , vdf )""" | parameters = { 'gameid' : gameID }
if format is not None :
parameters [ 'format' ] = format
url = self . create_request_url ( self . interface , 'GetGlobalAchievementPercentagesForApp' , 2 , parameters )
data = self . retrieve_request ( url )
return self . return_data ( data , format = format ) |
def get_field_template ( self , bound_field , template_name = None ) :
"""Uses a special field template for widget with multiple inputs . It only
applies if no other template than the default one has been defined .""" | template_name = super ( ) . get_field_template ( bound_field , template_name )
if ( template_name == self . field_template and isinstance ( bound_field . field . widget , ( forms . RadioSelect , forms . CheckboxSelectMultiple ) ) ) :
return 'tapeforms/fields/foundation_fieldset.html'
return template_name |
def flags ( self , index ) :
"""Returns the item flags for the given index as ored value , e . x . : Qt . ItemIsUserCheckable | Qt . ItemIsEditable
Args :
index ( QtCore . QModelIndex ) : Index to define column and row
Returns :
for column ' column ' : Qt . ItemIsSelectable | Qt . ItemIsEnabled
for column ' data type ' : Qt . ItemIsSelectable | Qt . ItemIsEnabled | Qt . ItemIsEditable""" | if not index . isValid ( ) :
return Qt . NoItemFlags
col = index . column ( )
flags = Qt . ItemIsEnabled | Qt . ItemIsSelectable
if col > 0 and self . editable ( ) :
flags = Qt . ItemIsSelectable | Qt . ItemIsEnabled | Qt . ItemIsEditable
return flags |
def listPrimaryDatasets ( self , primary_ds_name = "" , primary_ds_type = "" ) :
"""Returns all primary dataset if primary _ ds _ name or primary _ ds _ type are not passed .""" | conn = self . dbi . connection ( )
try :
result = self . primdslist . execute ( conn , primary_ds_name , primary_ds_type )
if conn :
conn . close ( )
return result
finally :
if conn :
conn . close ( ) |
def saved_groups ( self ) :
"""Return True if saved group files exits , else False .""" | if self . _saved_groups is None :
self . _saved_groups = False
fqfn_saved = os . path . join ( self . tcex . args . tc_temp_path , 'groups-saved' )
if ( self . enable_saved_file and os . path . isfile ( fqfn_saved ) and os . access ( fqfn_saved , os . R_OK ) ) :
self . _saved_groups = True
self . tcex . log . debug ( 'groups-saved file found' )
return self . _saved_groups |
def get_column_info ( connection , table_name ) :
"""Return an in order list of ( name , type ) tuples describing the
columns in the given table .""" | cursor = connection . cursor ( )
cursor . execute ( "SELECT sql FROM sqlite_master WHERE type == 'table' AND name == ?" , ( table_name , ) )
statement , = cursor . fetchone ( )
coldefs = re . match ( _sql_create_table_pattern , statement ) . groupdict ( ) [ "coldefs" ]
return [ ( coldef . groupdict ( ) [ "name" ] , coldef . groupdict ( ) [ "type" ] ) for coldef in re . finditer ( _sql_coldef_pattern , coldefs ) if coldef . groupdict ( ) [ "name" ] . upper ( ) not in ( "PRIMARY" , "UNIQUE" , "CHECK" ) ] |
def model_schema ( model : Type [ 'main.BaseModel' ] , by_alias : bool = True , ref_prefix : Optional [ str ] = None ) -> Dict [ str , Any ] :
"""Generate a JSON Schema for one model . With all the sub - models defined in the ` ` definitions ` ` top - level
JSON key .
: param model : a Pydantic model ( a class that inherits from BaseModel )
: param by _ alias : generate the schemas using the aliases defined , if any
: param ref _ prefix : the JSON Pointer prefix for schema references with ` ` $ ref ` ` , if None , will be set to the
default of ` ` # / definitions / ` ` . Update it if you want the schemas to reference the definitions somewhere
else , e . g . for OpenAPI use ` ` # / components / schemas / ` ` . The resulting generated schemas will still be at the
top - level key ` ` definitions ` ` , so you can extract them from there . But all the references will have the set
prefix .
: return : dict with the JSON Schema for the passed ` ` model ` `""" | ref_prefix = ref_prefix or default_prefix
flat_models = get_flat_models_from_model ( model )
model_name_map = get_model_name_map ( flat_models )
m_schema , m_definitions = model_process_schema ( model , by_alias = by_alias , model_name_map = model_name_map , ref_prefix = ref_prefix )
if m_definitions :
m_schema . update ( { 'definitions' : m_definitions } )
return m_schema |
def read_band ( self , key , info ) :
"""Read the data .""" | shape = int ( np . ceil ( self . mda [ 'data_field_length' ] / 8. ) )
if self . mda [ 'number_of_bits_per_pixel' ] == 16 :
dtype = '>u2'
shape //= 2
elif self . mda [ 'number_of_bits_per_pixel' ] in [ 8 , 10 ] :
dtype = np . uint8
shape = ( shape , )
data = np . memmap ( self . filename , mode = 'r' , offset = self . mda [ 'total_header_length' ] , dtype = dtype , shape = shape )
data = da . from_array ( data , chunks = shape [ 0 ] )
if self . mda [ 'number_of_bits_per_pixel' ] == 10 :
data = dec10216 ( data )
data = data . reshape ( ( self . mda [ 'number_of_lines' ] , self . mda [ 'number_of_columns' ] ) )
return data |
def _queue_manangement_worker ( executor_reference , processes , pending_work_items , work_ids_queue , call_queue , result_queue , shutdown_process_event ) :
"""Manages the communication between this process and the worker processes .
This function is run in a local thread .
Args :
executor _ reference : A weakref . ref to the ProcessPoolExecutor that owns
this thread . Used to determine if the ProcessPoolExecutor has been
garbage collected and that this function can exit .
process : A list of the multiprocessing . Process instances used as
workers .
pending _ work _ items : A dict mapping work ids to _ WorkItems e . g .
{5 : < _ WorkItem . . . > , 6 : < _ WorkItem . . . > , . . . }
work _ ids _ queue : A queue . Queue of work ids e . g . Queue ( [ 5 , 6 , . . . ] ) .
call _ queue : A multiprocessing . Queue that will be filled with _ CallItems
derived from _ WorkItems for processing by the process workers .
result _ queue : A multiprocessing . Queue of _ ResultItems generated by the
process workers .
shutdown _ process _ event : A multiprocessing . Event used to signal the
process workers that they should exit when their work queue is
empty .""" | while True :
_add_call_item_to_queue ( pending_work_items , work_ids_queue , call_queue )
try :
result_item = result_queue . get ( block = True , timeout = 0.1 )
except queue . Empty :
executor = executor_reference ( )
# No more work items can be added if :
# - The interpreter is shutting down OR
# - The executor that owns this worker has been collected OR
# - The executor that owns this worker has been shutdown .
if _shutdown or executor is None or executor . _shutdown_thread : # Since no new work items can be added , it is safe to shutdown
# this thread if there are no pending work items .
if not pending_work_items :
shutdown_process_event . set ( )
# If . join ( ) is not called on the created processes then
# some multiprocessing . Queue methods may deadlock on Mac OSX .
for p in processes :
p . join ( )
return
del executor
else :
work_item = pending_work_items . pop ( result_item . work_id )
loop = work_item . loop
loop . call_from_thread ( _set_work_result , work_item , result_item )
del result_item , work_item , loop |
def get_roster_file ( options ) :
'''Find respective roster file .
: param options :
: return :''' | template = None
# The _ _ disable _ custom _ roster is always True if Salt SSH Client comes
# from Salt API . In that case no way to define own ' roster _ file ' , instead
# this file needs to be chosen from already validated rosters
# ( see / etc / salt / master config ) .
if options . get ( '__disable_custom_roster' ) and options . get ( 'roster_file' ) :
roster = options . get ( 'roster_file' ) . strip ( '/' )
for roster_location in options . get ( 'rosters' ) :
r_file = os . path . join ( roster_location , roster )
if os . path . isfile ( r_file ) :
template = r_file
break
del options [ 'roster_file' ]
if not template :
if options . get ( 'roster_file' ) :
template = options . get ( 'roster_file' )
elif 'config_dir' in options . get ( '__master_opts__' , { } ) :
template = os . path . join ( options [ '__master_opts__' ] [ 'config_dir' ] , 'roster' )
elif 'config_dir' in options :
template = os . path . join ( options [ 'config_dir' ] , 'roster' )
else :
template = os . path . join ( salt . syspaths . CONFIG_DIR , 'roster' )
if not os . path . isfile ( template ) :
raise IOError ( 'Roster file "{0}" not found' . format ( template ) )
if not os . access ( template , os . R_OK ) :
raise IOError ( 'Access denied to roster "{0}"' . format ( template ) )
return template |
def upgradePrivateApplication3to4 ( old ) :
"""Upgrade L { PrivateApplication } from schema version 3 to schema version 4.
Copy all existing attributes to the new version and use the
L { PrivateApplication } to power up the item it is installed on for
L { ITemplateNameResolver } .""" | new = old . upgradeVersion ( PrivateApplication . typeName , 3 , 4 , preferredTheme = old . preferredTheme , privateKey = old . privateKey , website = old . website , customizedPublicPage = old . customizedPublicPage , authenticationApplication = old . authenticationApplication , preferenceAggregator = old . preferenceAggregator , defaultPreferenceCollection = old . defaultPreferenceCollection , searchAggregator = old . searchAggregator )
# Almost certainly this would be more correctly expressed as
# installedOn ( new ) . powerUp ( . . . ) , however the 2 to 3 upgrader failed to
# translate the installedOn attribute to state which installedOn can
# recognize , consequently installedOn ( new ) will return None for an item
# which was created at schema version 2 or earlier . It ' s not worth dealing
# with this inconsistency , since PrivateApplication is always only
# installed on its store . - exarkun
new . store . powerUp ( new , ITemplateNameResolver )
return new |
def projects ( self , task , params = { } , ** options ) :
"""Returns a compact representation of all of the projects the task is in .
Parameters
task : { Id } The task to get projects on .
[ params ] : { Object } Parameters for the request""" | path = "/tasks/%s/projects" % ( task )
return self . client . get_collection ( path , params , ** options ) |
def ConsumeCommentOrTrailingComment ( self ) :
"""Consumes a comment , returns a 2 - tuple ( trailing bool , comment str ) .""" | # Tokenizer initializes _ previous _ line and _ previous _ column to 0 . As the
# tokenizer starts , it looks like there is a previous token on the line .
just_started = self . _line == 0 and self . _column == 0
before_parsing = self . _previous_line
comment = self . ConsumeComment ( )
# A trailing comment is a comment on the same line than the previous token .
trailing = ( self . _previous_line == before_parsing and not just_started )
return trailing , comment |
def get_libraries ( ) :
'''return a list of libraries to link against .''' | # Note that this list does not include libcsamtools . so as there are
# numerous name conflicts with libchtslib . so .
dirname = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) ) )
pysam_libs = [ 'libctabixproxies' , 'libcfaidx' , 'libcsamfile' , 'libcvcf' , 'libcbcf' , 'libctabix' ]
if pysam . config . HTSLIB == "builtin" :
pysam_libs . append ( 'libchtslib' )
so = sysconfig . get_config_var ( 'SO' )
return [ os . path . join ( dirname , x + so ) for x in pysam_libs ] |
def get_interface_detail_output_interface_ifHCOutMulticastPkts ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_interface_detail = ET . Element ( "get_interface_detail" )
config = get_interface_detail
output = ET . SubElement ( get_interface_detail , "output" )
interface = ET . SubElement ( output , "interface" )
interface_type_key = ET . SubElement ( interface , "interface-type" )
interface_type_key . text = kwargs . pop ( 'interface_type' )
interface_name_key = ET . SubElement ( interface , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
ifHCOutMulticastPkts = ET . SubElement ( interface , "ifHCOutMulticastPkts" )
ifHCOutMulticastPkts . text = kwargs . pop ( 'ifHCOutMulticastPkts' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _move_mount ( robot , mount , point ) :
"""The carriage moves the mount in the Z axis , and the gantry moves in X and Y
Mount movements do not have the same protections calculated in to an
existing ` move ` command like Pipette does , so the safest thing is to home
the Z axis , then move in X and Y , then move down to the specified Z height""" | carriage = robot . _actuators [ mount ] [ 'carriage' ]
# Home both carriages , to prevent collisions and to ensure that the other
# mount doesn ' t block the one being moved ( mount moves are primarily for
# changing pipettes , so we don ' t want the other pipette blocking access )
robot . poses = carriage . home ( robot . poses )
other_mount = 'left' if mount == 'right' else 'right'
robot . poses = robot . _actuators [ other_mount ] [ 'carriage' ] . home ( robot . poses )
robot . gantry . move ( robot . poses , x = point [ 0 ] , y = point [ 1 ] )
robot . poses = carriage . move ( robot . poses , z = point [ 2 ] )
# These x and y values are hard to interpret because of some internals of
# pose tracker . It ' s mostly z that matters for this operation anyway
x , y , _ = tuple ( pose_tracker . absolute ( robot . poses , robot . _actuators [ mount ] [ 'carriage' ] ) )
_ , _ , z = tuple ( pose_tracker . absolute ( robot . poses , robot . gantry ) )
new_position = ( x , y , z )
return "Move complete. New position: {}" . format ( new_position ) |
def fix_config ( self , options ) :
"""Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary .
: param options : the options to fix
: type options : dict
: return : the ( potentially ) fixed options
: rtype : dict""" | options = super ( LinePlot , self ) . fix_config ( options )
opt = "attributes"
if opt not in options :
options [ opt ] = None
if opt not in self . help :
self . help [ opt ] = "The list of 0-based attribute indices to print; None for all (int)."
opt = "percent"
if opt not in options :
options [ opt ] = 100.0
if opt not in self . help :
self . help [ opt ] = "The percentage of the data to display (0-100, float)."
opt = "seed"
if opt not in options :
options [ opt ] = 1
if opt not in self . help :
self . help [ opt ] = "The seed value for randomizing the plot when viewing a subset (int)."
opt = "title"
if opt not in options :
options [ opt ] = None
if opt not in self . help :
self . help [ opt ] = "The title for the plot (str)."
opt = "outfile"
if opt not in options :
options [ opt ] = None
if opt not in self . help :
self . help [ opt ] = "The file to store the plot in (str)."
opt = "wait"
if opt not in options :
options [ opt ] = True
if opt not in self . help :
self . help [ opt ] = "Whether to wait for user to close the plot window (bool)."
return options |
def set_fixed_mask ( self , image ) :
"""Set Fixed ANTsImage Mask for metric""" | if not isinstance ( image , iio . ANTsImage ) :
raise ValueError ( 'image must be ANTsImage type' )
if image . dimension != self . dimension :
raise ValueError ( 'image dim (%i) does not match metric dim (%i)' % ( image . dimension , self . dimension ) )
self . _metric . setFixedImage ( image . pointer , True )
self . fixed_mask = image |
def reverse ( self ) :
"""In place reverses the list . Very expensive on large data sets .
The reversed list will be persisted to the redis : prop : _ client
as well .""" | tmp_list = RedisList ( randint ( 0 , 100000000 ) , prefix = self . key_prefix , client = self . _client , serializer = self . serializer , serialized = self . serialized )
cursor = '0'
count = 1000
start = ( - 1 * count )
stop = - 1
_loads = self . _loads
while cursor :
cursor = self . _client . lrange ( self . key_prefix , start , stop )
if cursor :
tmp_list . extend ( map ( _loads , reversed ( cursor ) ) )
start -= count
stop -= count
self . _client . rename ( tmp_list . key_prefix , self . key_prefix )
tmp_list . clear ( ) |
def dump_uint_b_into ( n , width , buffer , offset = 0 ) :
"""Serializes fixed size integer to the buffer
: param n :
: param width :
: return :""" | for idx in range ( width ) :
buffer [ idx + offset ] = n & 0xff
n >>= 8
return buffer |
def assertStructIsInline ( self , obj ) :
"""Structs are always stored inline , so need to be created right
where they are used . You ' ll get this error if you created it
elsewhere .""" | N . enforce_number ( obj , N . UOffsetTFlags )
if obj != self . Offset ( ) :
msg = ( "flatbuffers: Tried to write a Struct at an Offset that " "is different from the current Offset of the Builder." )
raise StructIsNotInlineError ( msg ) |
def swapon ( name , priority = None ) :
'''Activate a swap disk
. . versionchanged : : 2016.3.2
CLI Example :
. . code - block : : bash
salt ' * ' mount . swapon / root / swapfile''' | ret = { }
on_ = swaps ( )
if name in on_ :
ret [ 'stats' ] = on_ [ name ]
ret [ 'new' ] = False
return ret
if __grains__ [ 'kernel' ] == 'SunOS' :
if __grains__ [ 'virtual' ] != 'zone' :
__salt__ [ 'cmd.run' ] ( 'swap -a {0}' . format ( name ) , python_shell = False )
else :
return False
else :
cmd = 'swapon {0}' . format ( name )
if priority and 'AIX' not in __grains__ [ 'kernel' ] :
cmd += ' -p {0}' . format ( priority )
__salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
on_ = swaps ( )
if name in on_ :
ret [ 'stats' ] = on_ [ name ]
ret [ 'new' ] = True
return ret
return ret |
def _decode_preferred_encoding ( s ) :
"""Decode the supplied byte string using the preferred encoding
for the locale ( ` locale . getpreferredencoding ` ) or , if the default encoding
is invalid , fall back first on utf - 8 , then on latin - 1 if the message cannot
be decoded with utf - 8.""" | enc = locale . getpreferredencoding ( )
try :
try :
return s . decode ( enc )
except LookupError :
enc = _DEFAULT_ENCODING
return s . decode ( enc )
except UnicodeDecodeError :
return s . decode ( 'latin-1' ) |
def copy_file ( src , dest ) :
"""copy single file""" | try :
shutil . copy2 ( src , dest )
except Exception as ex :
print ( 'ERROR copying file' + str ( ex ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.