signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def try_ntimes ( _howmany , func , * argv , ** kwarg ) :
"""Try a function n times .
Try to execute func ( * argv , * * kwarg ) ` ` _ howmany ` ` times . If it successfully run
one time , then return as normal . If it fails N times , then raise the
exception in the last run .
* * 中文文档 * *
反复尝试一个函数或方法 ` ` _ howmany ` ` 次 。
对func函数使用try , except , pass 若干次 , 期间只要有一次成功 , 就正常返回 。
如果一次都没有成功 , 则行为跟最后一次执行了func ( * argv , * * kwarg ) 函数一样 。
这个实现利用了python中可以把函数作为一个参数传入另一个函数的特质 , 将func
函数中的参数原封不动地封装到了try _ ntimes的参数中 。 只用一个额外参数 ` ` _ howmany ` `
控制重复次数 。""" | if ( not isinstance ( _howmany , int ) ) or ( _howmany < 1 ) :
raise Exception ( "'_howmany' argument has to be int and greater than 0" )
counter = 1
while counter <= _howmany :
try :
return func ( * argv , ** kwarg )
except Exception as e :
current_exception = e
counter += 1
raise current_exception |
async def init ( self ) -> None :
'''Initialize configuration and start tasks .''' | self . stats = await self . insert ( self . stats )
self . configuration = await self . insert ( self . configuration )
if not self . executor :
try :
max_workers = self . config . get ( 'executor_workers' )
except Exception :
max_workers = None
self . executor = ThreadPoolExecutor ( max_workers = max_workers )
for task in self . initial_tasks :
await self . insert ( task )
self . monitor = asyncio . ensure_future ( self . monitor_tasks ( ) )
self . counters [ 'alive_since' ] = time . time ( ) |
def getmakeidfobject ( idf , key , name ) :
"""get idfobject or make it if it does not exist""" | idfobject = idf . getobject ( key , name )
if not idfobject :
return idf . newidfobject ( key , Name = name )
else :
return idfobject |
def animate ( self , filename = 'constellation.mp4' , epochs = [ 1900 , 2100 ] , dt = 5 , dpi = 300 , fps = 10 , ** kw ) :
'''Animate a finder chart .''' | scatter = self . finder ( ** kw )
plt . tight_layout ( )
figure = plt . gcf ( )
if '.gif' in filename :
try :
writer = ani . writers [ 'pillow' ] ( fps = fps )
except ( RuntimeError , KeyError ) :
writer = ani . writers [ 'imagemagick' ] ( fps = fps )
except :
raise RuntimeError ( 'This python seems unable to make an animated gif.' )
else :
try :
writer = ani . writers [ 'ffmpeg' ] ( fps = fps )
except ( RuntimeError , KeyError ) :
raise RuntimeError ( 'This computer seems unable to ffmpeg.' )
with writer . saving ( figure , filename , dpi or figure . get_dpi ( ) ) :
for epoch in tqdm ( np . arange ( epochs [ 0 ] , epochs [ 1 ] + dt , dt ) ) : # update the illustration to a new time
coords = self . atEpoch ( epoch )
scatter . set_offsets ( list ( zip ( coords . ra . value , coords . dec . value ) ) )
plt . title ( '{} in {:.1f}' . format ( self . name , epoch ) )
writer . grab_frame ( ) |
def enable ( self , key_id , ** kwargs ) :
"""Enable a deploy key for a project .
Args :
key _ id ( int ) : The ID of the key to enable
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabProjectDeployKeyError : If the key could not be enabled""" | path = '%s/%s/enable' % ( self . path , key_id )
self . gitlab . http_post ( path , ** kwargs ) |
def set_last_col_idx ( self , last_col_idx ) :
'''Parameters
param last _ col _ idx : int
number of columns''' | assert last_col_idx >= self . _max_col
self . _max_col = last_col_idx
return self |
def _prepare_env ( self ) : # pragma : no cover
"""Setup the document ' s environment , if necessary .""" | env = self . state . document . settings . env
if not hasattr ( env , self . directive_name ) : # Track places where we use this directive , so we can check for
# outdated documents in the future .
state = DirectiveState ( )
setattr ( env , self . directive_name , state )
else :
state = getattr ( env , self . directive_name )
return env , state |
def double_click ( self , on_element = None ) :
"""Double - clicks an element .
: Args :
- on _ element : The element to double - click .
If None , clicks on current mouse position .""" | if on_element :
self . move_to_element ( on_element )
if self . _driver . w3c :
self . w3c_actions . pointer_action . double_click ( )
for _ in range ( 4 ) :
self . w3c_actions . key_action . pause ( )
else :
self . _actions . append ( lambda : self . _driver . execute ( Command . DOUBLE_CLICK , { } ) )
return self |
def _handle_load ( cls , handler , file_object , validate = False , ** kwargs ) :
"""Loads caller , used by partial method for dynamic handler assignments .
: param object handler : The loads handler
: param file file _ object : The file object to load from
: param bool validate : Performs content validation before loading ,
defaults to False , optional
: return : The loaded instance
: rtype : object""" | return from_dict ( cls , handler . load ( cls , file_object , ** kwargs ) , validate = validate ) |
def place_items_in_square ( items , t ) :
"""Returns a list of rows that are stored as a priority queue to be
used with heapq functions .
> > > place _ items _ in _ square ( [ 1,5,7 ] , 4)
[ ( 2 , 1 , [ ( 1 , 5 ) , ( 3 , 7 ) ] ) , ( 3 , 0 , [ ( 1 , 1 ) ] ) ]
> > > place _ items _ in _ square ( [ 1,5,7 ] , 3)
[ ( 2 , 0 , [ ( 1 , 1 ) ] ) , ( 2 , 1 , [ ( 2 , 5 ) ] ) , ( 2 , 2 , [ ( 1 , 7 ) ] ) ]""" | # A minheap ( because that ' s all that heapq supports : / )
# of the length of each row . Why this is important is because
# we ' ll be popping the largest rows when figuring out row displacements .
# Each item is a tuple of ( t - | row | , y , [ ( xpos _ 1 , item _ 1 ) , . . . ] ) .
# Until the call to heapq . heapify ( ) , the rows are ordered in
# increasing row number ( y ) .
rows = [ ( t , y , [ ] ) for y in range ( t ) ]
for item in items : # Calculate the cell the item should fall in .
x = item % t
y = item // t
# Push the item to its corresponding row . . .
inverse_length , _ , row_contents = rows [ y ]
heapq . heappush ( row_contents , ( x , item ) )
# Ensure the heap key is kept intact .
rows [ y ] = inverse_length - 1 , y , row_contents
assert all ( inv_len == t - len ( rows ) for inv_len , _ , rows in rows )
heapq . heapify ( rows )
# Return only rows that are populated .
return [ row for row in rows if row [ 2 ] ] |
def is_sqlatype_date ( coltype : TypeEngine ) -> bool :
"""Is the SQLAlchemy column type a date type ?""" | coltype = _coltype_to_typeengine ( coltype )
# No longer valid in SQLAlchemy 1.2.11:
# return isinstance ( coltype , sqltypes . _ DateAffinity )
return ( isinstance ( coltype , sqltypes . DateTime ) or isinstance ( coltype , sqltypes . Date ) ) |
def union_categoricals ( to_union , sort_categories = False , ignore_order = False ) :
"""Combine list - like of Categorical - like , unioning categories . All
categories must have the same dtype .
. . versionadded : : 0.19.0
Parameters
to _ union : list - like of Categorical , CategoricalIndex ,
or Series with dtype = ' category '
sort _ categories : boolean , default False
If true , resulting categories will be lexsorted , otherwise
they will be ordered as they appear in the data .
ignore _ order : boolean , default False
If true , the ordered attribute of the Categoricals will be ignored .
Results in an unordered categorical .
. . versionadded : : 0.20.0
Returns
result : Categorical
Raises
TypeError
- all inputs do not have the same dtype
- all inputs do not have the same ordered property
- all inputs are ordered and their categories are not identical
- sort _ categories = True and Categoricals are ordered
ValueError
Empty list of categoricals passed
Notes
To learn more about categories , see ` link
< http : / / pandas . pydata . org / pandas - docs / stable / categorical . html # unioning > ` _ _
Examples
> > > from pandas . api . types import union _ categoricals
If you want to combine categoricals that do not necessarily have
the same categories , ` union _ categoricals ` will combine a list - like
of categoricals . The new categories will be the union of the
categories being combined .
> > > a = pd . Categorical ( [ " b " , " c " ] )
> > > b = pd . Categorical ( [ " a " , " b " ] )
> > > union _ categoricals ( [ a , b ] )
[ b , c , a , b ]
Categories ( 3 , object ) : [ b , c , a ]
By default , the resulting categories will be ordered as they appear
in the ` categories ` of the data . If you want the categories to be
lexsorted , use ` sort _ categories = True ` argument .
> > > union _ categoricals ( [ a , b ] , sort _ categories = True )
[ b , c , a , b ]
Categories ( 3 , object ) : [ a , b , c ]
` union _ categoricals ` also works with the case of combining two
categoricals of the same categories and order information ( e . g . what
you could also ` append ` for ) .
> > > a = pd . Categorical ( [ " a " , " b " ] , ordered = True )
> > > b = pd . Categorical ( [ " a " , " b " , " a " ] , ordered = True )
> > > union _ categoricals ( [ a , b ] )
[ a , b , a , b , a ]
Categories ( 2 , object ) : [ a < b ]
Raises ` TypeError ` because the categories are ordered and not identical .
> > > a = pd . Categorical ( [ " a " , " b " ] , ordered = True )
> > > b = pd . Categorical ( [ " a " , " b " , " c " ] , ordered = True )
> > > union _ categoricals ( [ a , b ] )
TypeError : to union ordered Categoricals , all categories must be the same
New in version 0.20.0
Ordered categoricals with different categories or orderings can be
combined by using the ` ignore _ ordered = True ` argument .
> > > a = pd . Categorical ( [ " a " , " b " , " c " ] , ordered = True )
> > > b = pd . Categorical ( [ " c " , " b " , " a " ] , ordered = True )
> > > union _ categoricals ( [ a , b ] , ignore _ order = True )
[ a , b , c , c , b , a ]
Categories ( 3 , object ) : [ a , b , c ]
` union _ categoricals ` also works with a ` CategoricalIndex ` , or ` Series `
containing categorical data , but note that the resulting array will
always be a plain ` Categorical `
> > > a = pd . Series ( [ " b " , " c " ] , dtype = ' category ' )
> > > b = pd . Series ( [ " a " , " b " ] , dtype = ' category ' )
> > > union _ categoricals ( [ a , b ] )
[ b , c , a , b ]
Categories ( 3 , object ) : [ b , c , a ]""" | from pandas import Index , Categorical , CategoricalIndex , Series
from pandas . core . arrays . categorical import _recode_for_categories
if len ( to_union ) == 0 :
raise ValueError ( 'No Categoricals to union' )
def _maybe_unwrap ( x ) :
if isinstance ( x , ( CategoricalIndex , Series ) ) :
return x . values
elif isinstance ( x , Categorical ) :
return x
else :
raise TypeError ( "all components to combine must be Categorical" )
to_union = [ _maybe_unwrap ( x ) for x in to_union ]
first = to_union [ 0 ]
if not all ( is_dtype_equal ( other . categories . dtype , first . categories . dtype ) for other in to_union [ 1 : ] ) :
raise TypeError ( "dtype of categories must be the same" )
ordered = False
if all ( first . is_dtype_equal ( other ) for other in to_union [ 1 : ] ) : # identical categories - fastpath
categories = first . categories
ordered = first . ordered
if all ( first . categories . equals ( other . categories ) for other in to_union [ 1 : ] ) :
new_codes = np . concatenate ( [ c . codes for c in to_union ] )
else :
codes = [ first . codes ] + [ _recode_for_categories ( other . codes , other . categories , first . categories ) for other in to_union [ 1 : ] ]
new_codes = np . concatenate ( codes )
if sort_categories and not ignore_order and ordered :
raise TypeError ( "Cannot use sort_categories=True with " "ordered Categoricals" )
if sort_categories and not categories . is_monotonic_increasing :
categories = categories . sort_values ( )
indexer = categories . get_indexer ( first . categories )
from pandas . core . algorithms import take_1d
new_codes = take_1d ( indexer , new_codes , fill_value = - 1 )
elif ignore_order or all ( not c . ordered for c in to_union ) : # different categories - union and recode
cats = first . categories . append ( [ c . categories for c in to_union [ 1 : ] ] )
categories = Index ( cats . unique ( ) )
if sort_categories :
categories = categories . sort_values ( )
new_codes = [ _recode_for_categories ( c . codes , c . categories , categories ) for c in to_union ]
new_codes = np . concatenate ( new_codes )
else : # ordered - to show a proper error message
if all ( c . ordered for c in to_union ) :
msg = ( "to union ordered Categoricals, " "all categories must be the same" )
raise TypeError ( msg )
else :
raise TypeError ( 'Categorical.ordered must be the same' )
if ignore_order :
ordered = False
return Categorical ( new_codes , categories = categories , ordered = ordered , fastpath = True ) |
def ParseReportDescriptor ( rd , desc ) :
"""Parse the binary report descriptor .
Parse the binary report descriptor into a DeviceDescriptor object .
Args :
rd : The binary report descriptor
desc : The DeviceDescriptor object to update with the results
from parsing the descriptor .
Returns :
None""" | rd = bytearray ( rd )
pos = 0
report_count = None
report_size = None
usage_page = None
usage = None
while pos < len ( rd ) :
key = rd [ pos ]
# First step , determine the value encoding ( either long or short ) .
key_size , value_length = GetValueLength ( rd , pos )
if key & REPORT_DESCRIPTOR_KEY_MASK == INPUT_ITEM :
if report_count and report_size :
byte_length = ( report_count * report_size ) // 8
desc . internal_max_in_report_len = max ( desc . internal_max_in_report_len , byte_length )
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == OUTPUT_ITEM :
if report_count and report_size :
byte_length = ( report_count * report_size ) // 8
desc . internal_max_out_report_len = max ( desc . internal_max_out_report_len , byte_length )
report_count = None
report_size = None
elif key & REPORT_DESCRIPTOR_KEY_MASK == COLLECTION_ITEM :
if usage_page :
desc . usage_page = usage_page
if usage :
desc . usage = usage
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_COUNT :
if len ( rd ) >= pos + 1 + value_length :
report_count = ReadLsbBytes ( rd , pos + 1 , value_length )
elif key & REPORT_DESCRIPTOR_KEY_MASK == REPORT_SIZE :
if len ( rd ) >= pos + 1 + value_length :
report_size = ReadLsbBytes ( rd , pos + 1 , value_length )
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE_PAGE :
if len ( rd ) >= pos + 1 + value_length :
usage_page = ReadLsbBytes ( rd , pos + 1 , value_length )
elif key & REPORT_DESCRIPTOR_KEY_MASK == USAGE :
if len ( rd ) >= pos + 1 + value_length :
usage = ReadLsbBytes ( rd , pos + 1 , value_length )
pos += value_length + key_size
return desc |
def add_log_type ( name , display , color , bcolor ) :
"""name : call name ( A - Z and ' _ ' )
display : display message in [ - ]
color : text color ( see bashutils . colors )
bcolor : background color ( see bashutils . colors )""" | global MESSAGE_LOG
v_name = name . replace ( " " , "_" ) . upper ( )
val = 0
lkey = MESSAGE_LOG . keys ( )
while val in lkey :
val += 1
MESSAGE_LOG [ val ] = [ v_name , ( display , color , bcolor , ) ]
setattr ( LOG , v_name , val ) |
def format_error ( status = None , title = None , detail = None , code = None ) :
'''Formatting JSON API Error Object
Constructing an error object based on JSON API standard
ref : http : / / jsonapi . org / format / # error - objects
Args :
status : Can be a http status codes
title : A summary of error
detail : A descriptive error message
code : Application error codes ( if any )
Returns :
A dictionary contains of status , title , detail and code''' | error = { }
error . update ( { 'title' : title } )
if status is not None :
error . update ( { 'status' : status } )
if detail is not None :
error . update ( { 'detail' : detail } )
if code is not None :
error . update ( { 'code' : code } )
return error |
def is_none_or ( self ) :
"""Ensures : attr : ` subject ` is either ` ` None ` ` , or satisfies subsequent ( chained ) conditions : :
Ensure ( None ) . is _ none _ or . is _ an ( int )""" | if self . _subject is None :
return NoOpInspector ( subject = self . _subject , error_factory = self . _error_factory )
else :
return self |
def get_dilation_rates ( hparams , width ) :
"""Get a list of valid dilation rates .
Args :
hparams : HParams .
width : spatial dimension . Ensures that the effective filter size is
not larger than the spatial dimension .
Returns :
allowed _ dilations : A list of dilation rates .""" | # dil _ rate = 1 means no dilation .
allowed_dilations = [ [ 1 ] * 5 ]
apply_dilations = hparams . get ( "latent_apply_dilations" , False )
dilation_rates = hparams . get ( "latent_dilation_rates" , [ 1 , 3 ] )
if apply_dilations :
for rate in dilation_rates : # k + ( k - 1 ) * rate but k is harcoded to be 3 everywhere .
filter_size = 3 + 2 * rate
if filter_size <= width :
curr_dilation = [ 1 , 1 , rate + 1 , rate + 1 , 1 ]
allowed_dilations . append ( curr_dilation )
return allowed_dilations |
def convert_depthtospace ( node , ** kwargs ) :
"""Map MXNet ' s depth _ to _ space operator attributes to onnx ' s
DepthToSpace operator and return the created node .""" | name , input_nodes , attrs = get_inputs ( node , kwargs )
blksize = int ( attrs . get ( "block_size" , 0 ) )
node = onnx . helper . make_node ( "DepthToSpace" , input_nodes , [ name ] , blocksize = blksize , name = name , )
return [ node ] |
def secret_finder ( self ) :
"""Parses the supplied secret . txt file for the consumer key and secrets""" | secretlist = list ( )
if os . path . isfile ( self . secret_file ) : # Open the file , and put the contents into a list
with open ( self . secret_file , 'r' ) as secret :
for line in secret :
secretlist . append ( line . rstrip ( ) )
# Extract the key and secret from the list
self . consumer_key = secretlist [ 0 ]
self . consumer_secret = secretlist [ 1 ]
else :
print ( '"Cannot find the secret.txt file required for authorization. ' 'Please ensure that this file exists, and that the supplied consumer key is on the ' 'first line, and the consumer secret is on he second line. ' 'Contact keith.jolley@zoo.ox.ac.uk for an account, and the necessary keys' )
quit ( ) |
def to_pb ( self ) :
"""Converts the garbage collection rule to a protobuf .
: rtype : : class : ` . table _ v2 _ pb2 . GcRule `
: returns : The converted current object .""" | max_age = _helpers . _timedelta_to_duration_pb ( self . max_age )
return table_v2_pb2 . GcRule ( max_age = max_age ) |
def labelForAction ( self , action ) :
"""Returns the label that contains the inputed action .
: return < XDockActionLabel > | | None""" | for label in self . actionLabels ( ) :
if label . action ( ) == action :
return label
return None |
def axis_angle ( self ) :
""": obj : ` numpy . ndarray ` of float : The axis - angle representation for the rotation .""" | qw , qx , qy , qz = self . quaternion
theta = 2 * np . arccos ( qw )
omega = np . array ( [ 1 , 0 , 0 ] )
if theta > 0 :
rx = qx / np . sqrt ( 1.0 - qw ** 2 )
ry = qy / np . sqrt ( 1.0 - qw ** 2 )
rz = qz / np . sqrt ( 1.0 - qw ** 2 )
omega = np . array ( [ rx , ry , rz ] )
return theta * omega |
def write_data ( data ) :
"""Write the data to the data . json file
Args :
data ( dict ) : The updated data dictionary for Modis""" | sorted_dict = sort_recursive ( data )
with open ( _datafile , 'w' ) as file :
_json . dump ( sorted_dict , file , indent = 2 ) |
def _get_ids_from_name_public ( self , name ) :
"""Get public images which match the given name .""" | results = self . list_public_images ( name = name )
return [ result [ 'id' ] for result in results ] |
def list_nodes ( ) :
'''List virtual machines
. . code - block : : bash
salt - cloud - Q''' | session = _get_session ( )
vms = session . xenapi . VM . get_all_records ( )
ret = { }
for vm in vms :
record = session . xenapi . VM . get_record ( vm )
if not record [ 'is_a_template' ] and not record [ 'is_control_domain' ] :
try :
base_template_name = record [ 'other_config' ] [ 'base_template_name' ]
except Exception :
base_template_name = None
log . debug ( 'VM %s, doesnt have base_template_name attribute' , record [ 'name_label' ] )
ret [ record [ 'name_label' ] ] = { 'id' : record [ 'uuid' ] , 'image' : base_template_name , 'name' : record [ 'name_label' ] , 'size' : record [ 'memory_dynamic_max' ] , 'state' : record [ 'power_state' ] , 'private_ips' : get_vm_ip ( record [ 'name_label' ] , session ) , 'public_ips' : None }
return ret |
def _startSchedulesNode ( self , name , attrs ) :
"""Process the start of a node under xtvd / schedules""" | if name == 'schedule' :
self . _programId = attrs . get ( 'program' )
self . _stationId = attrs . get ( 'station' )
self . _time = self . _parseDateTime ( attrs . get ( 'time' ) )
self . _duration = self . _parseDuration ( attrs . get ( 'duration' ) )
self . _new = attrs . has_key ( 'new' )
self . _stereo = attrs . has_key ( 'stereo' )
self . _subtitled = attrs . has_key ( 'subtitled' )
self . _hdtv = attrs . has_key ( 'hdtv' )
self . _closeCaptioned = attrs . has_key ( 'closeCaptioned' )
self . _ei = attrs . has_key ( 'ei' )
self . _tvRating = attrs . get ( 'tvRating' )
self . _dolby = attrs . get ( 'dolby' )
self . _partNumber = None
self . _partTotal = None
elif name == 'part' :
self . _partNumber = attrs . get ( 'number' )
self . _partTotal = attrs . get ( 'total' ) |
def get_spatial_type ( spatial_model ) :
"""Translate a spatial model string to a spatial type .""" | if spatial_model in [ 'SkyDirFunction' , 'PointSource' , 'Gaussian' ] :
return 'SkyDirFunction'
elif spatial_model in [ 'SpatialMap' ] :
return 'SpatialMap'
elif spatial_model in [ 'RadialGaussian' , 'RadialDisk' ] :
try :
import pyLikelihood
if hasattr ( pyLikelihood , 'RadialGaussian' ) :
return spatial_model
else :
return 'SpatialMap'
except Exception :
return spatial_model
else :
return spatial_model |
def plot_dendrogram ( ax , obj , show_diameters = True ) :
'''Dendrogram of ` obj `
Args :
obj : Neuron or tree neurom . Neuron , neurom . Tree
show _ diameters : boolean Determines if node diameters will be show or not .''' | # create dendrogram and generate rectangle collection
dnd = Dendrogram ( obj , show_diameters = show_diameters )
dnd . generate ( )
# render dendrogram and take into account neurite displacement which
# starts as zero . It is important to avoid overlapping of neurites
# and to determine tha limits of the figure .
_render_dendrogram ( dnd , ax , 0. )
ax . set_title ( 'Morphology Dendrogram' )
ax . set_xlabel ( 'micrometers (um)' )
ax . set_ylabel ( 'micrometers (um)' )
ax . set_aspect ( 'auto' )
ax . legend ( ) |
def append ( self , key , _item ) : # type : ( Union [ Key , str ] , Any ) - > InlineTable
"""Appends a ( key , item ) to the table .""" | if not isinstance ( _item , Item ) :
_item = item ( _item )
if not isinstance ( _item , ( Whitespace , Comment ) ) :
if not _item . trivia . indent and len ( self . _value ) > 0 :
_item . trivia . indent = " "
if _item . trivia . comment :
_item . trivia . comment = ""
self . _value . append ( key , _item )
if isinstance ( key , Key ) :
key = key . key
if key is not None :
super ( InlineTable , self ) . __setitem__ ( key , _item )
return self |
def cancel ( self , consumer_tag = None ) :
"""Cancels the current consuming action by using the stored consumer _ tag . If a consumer _ tag is given , that one is used instead .
Parameters
consumer _ tag : string
Tag of consumer to cancel""" | if not consumer_tag :
if not hasattr ( self , "consumer_tag" ) :
return
consumer_tag = self . consumer_tag
self . channel . basic_cancel ( consumer_tag ) |
def read_logodata ( handle ) :
"""Get weblogo data for a sequence alignment .
Returns a list of tuples : ( posn , letter _ counts , entropy , weight )""" | seqs = weblogolib . read_seq_data ( handle , alphabet = unambiguous_protein_alphabet )
ldata = weblogolib . LogoData . from_seqs ( seqs )
letters = ldata . alphabet . letters ( )
counts = ldata . counts . array
logodata = [ ]
for i , coldata , entropy , weight in zip ( range ( len ( counts ) ) , counts , ldata . entropy , ldata . weight ) :
cnts = dict ( ( let , int ( cnt ) ) for let , cnt in zip ( letters , coldata ) )
logodata . append ( ( i + 1 , cnts , entropy , weight ) )
return logodata |
async def service_observable ( self , limit ) -> int :
"""Service the observable ' s inBox and outBox
: return : the number of messages successfully serviced""" | if not self . isReady ( ) :
return 0
o = self . _service_observable_out_box ( limit )
i = await self . _observable . serviceQueues ( limit )
return o + i |
def set_instance ( self , thing : type , value , overwrite = False ) :
"""Set an instance of a thing .""" | if thing in self . instances and not overwrite :
raise DiayException ( 'instance for %r already exists' % thing )
self . instances [ thing ] = value |
def factory ( resp ) :
"""Return a ResponseError subclass based on the API payload .
All errors are documented : https : / / docs . mollie . com / guides / handling - errors # all - possible - status - codes
More exceptions should be added here when appropriate , and when useful examples of API errors are available .""" | status = resp [ 'status' ]
if status == 401 :
return UnauthorizedError ( resp )
elif status == 404 :
return NotFoundError ( resp )
elif status == 422 :
return UnprocessableEntityError ( resp )
else : # generic fallback
return ResponseError ( resp ) |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'begin' ) and self . begin is not None :
_dict [ 'begin' ] = self . begin
if hasattr ( self , 'end' ) and self . end is not None :
_dict [ 'end' ] = self . end
return _dict |
def data ( self , index , role = QtCore . Qt . UserRole ) :
"""Used by the view to determine data to present
See : qtdoc : ` QAbstractItemModel < QAbstractItemModel . data > ` ,
and : qtdoc : ` subclassing < qabstractitemmodel . subclassing > `""" | if role == QtCore . Qt . DisplayRole :
row = index . row ( )
field = self . _headers [ index . column ( ) ]
val = self . model . paramValue ( row , field )
if 1 <= index . column ( ) <= 3 : # standard units for data , not necessary current for UI
# view will scale and convert appropriately
unit = self . model . getDetail ( index . row ( ) , 'unit' )
if val is not None and unit is not None :
return str ( val ) + ' ' + unit
else :
return val
else :
return val
elif role == QtCore . Qt . EditRole :
row = index . row ( )
field = self . _headers [ index . column ( ) ]
return self . model . paramValue ( row , field )
# return self . model . paramValue ( row , field )
elif role == QtCore . Qt . ForegroundRole : # color the background red for bad values
if not self . checkValidCell ( index ) :
return QtGui . QBrush ( ERRCELL )
elif role == QtCore . Qt . FontRole : # color the background red for bad values
if not self . checkValidCell ( index ) :
f = QtGui . QFont ( )
f . setWeight ( QtGui . QFont . Bold )
return f
elif role == QtCore . Qt . UserRole or role == AbstractDragView . DragRole : # return the whole python object
param = self . model . param ( index . row ( ) )
for comp in param [ 'selection' ] :
comp . clean ( )
return param
elif role == self . SelectionModelRole : # may need to translate to QModelIndexes
return self . model . selection ( self . model . param ( index . row ( ) ) )
elif role == CursorRole :
col = index . column ( )
if not index . isValid ( ) :
return QtGui . QCursor ( QtCore . Qt . ArrowCursor )
elif col == 0 :
return cursors . pointyHand ( )
elif col < 4 :
return cursors . handEdit ( )
else :
return cursors . openHand ( ) |
def on_message ( self , unused_channel , basic_deliver , properties , body ) :
"""Called on receipt of a message from a queue .
Processes the message using the self . _ process method or function and positively
acknowledges the queue if successful . If processing is not succesful ,
the message can either be rejected , quarantined or negatively acknowledged ,
depending on the failure mode .
: param basic _ deliver : AMQP basic . deliver method
: param properties : Message properties
: param body : Message body
: returns : None""" | if self . check_tx_id :
try :
tx_id = self . tx_id ( properties )
logger . info ( 'Received message' , queue = self . _queue , delivery_tag = basic_deliver . delivery_tag , app_id = properties . app_id , tx_id = tx_id )
except KeyError as e :
self . reject_message ( basic_deliver . delivery_tag )
logger . error ( "Bad message properties - no tx_id" , action = "rejected" , exception = str ( e ) )
return None
except TypeError as e :
self . reject_message ( basic_deliver . delivery_tag )
logger . error ( "Bad message properties - no headers" , action = "rejected" , exception = str ( e ) )
return None
else :
logger . debug ( "check_tx_id is False. Not checking tx_id for message." , delivery_tag = basic_deliver . delivery_tag )
tx_id = None
try :
try :
self . process ( body . decode ( "utf-8" ) , tx_id )
except TypeError :
logger . error ( 'Incorrect call to process method' )
raise QuarantinableError
self . acknowledge_message ( basic_deliver . delivery_tag , tx_id = tx_id )
except ( QuarantinableError , BadMessageError ) as e : # Throw it into the quarantine queue to be dealt with
try :
self . quarantine_publisher . publish_message ( body , headers = { 'tx_id' : tx_id } )
self . reject_message ( basic_deliver . delivery_tag , tx_id = tx_id )
logger . error ( "Quarantinable error occured" , action = "quarantined" , exception = str ( e ) , tx_id = tx_id )
except PublishMessageError :
logger . error ( "Unable to publish message to quarantine queue. Rejecting message and requeuing." )
self . reject_message ( basic_deliver . delivery_tag , requeue = True , tx_id = tx_id )
except RetryableError as e :
self . nack_message ( basic_deliver . delivery_tag , tx_id = tx_id )
logger . error ( "Failed to process" , action = "nack" , exception = str ( e ) , tx_id = tx_id )
except Exception as e :
self . nack_message ( basic_deliver . delivery_tag , tx_id = tx_id )
logger . exception ( "Unexpected exception occurred" )
logger . error ( "Failed to process" , action = "nack" , exception = str ( e ) , tx_id = tx_id ) |
def get_feed_posts ( self ) -> Iterator [ Post ] :
"""Get Posts of the user ' s feed .
: return : Iterator over Posts of the user ' s feed .""" | data = self . context . graphql_query ( "d6f4427fbe92d846298cf93df0b937d3" , { } ) [ "data" ]
while True :
feed = data [ "user" ] [ "edge_web_feed_timeline" ]
yield from ( Post ( self . context , edge [ "node" ] ) for edge in feed [ "edges" ] if not edge [ "node" ] [ "__typename" ] == "GraphSuggestedUserFeedUnit" )
if not feed [ "page_info" ] [ "has_next_page" ] :
break
data = self . context . graphql_query ( "d6f4427fbe92d846298cf93df0b937d3" , { 'fetch_media_item_count' : 12 , 'fetch_media_item_cursor' : feed [ "page_info" ] [ "end_cursor" ] , 'fetch_comment_count' : 4 , 'fetch_like' : 10 , 'has_stories' : False } ) [ "data" ] |
def get_recursive_subclasses ( cls ) :
"""Return list of all subclasses for a class , including subclasses of direct subclasses""" | return cls . __subclasses__ ( ) + [ g for s in cls . __subclasses__ ( ) for g in get_recursive_subclasses ( s ) ] |
def _print_percent ( self ) :
'''Print how much is done in percentage .''' | fraction_done = ( ( self . continue_value or 0 + self . current_value ) / self . max_value )
self . _print ( '{fraction_done:.1%}' . format ( fraction_done = fraction_done ) ) |
def status ( self , obj ) :
"""Get the wifi interface status .""" | reply = self . _send_cmd_to_wpas ( obj [ 'name' ] , 'STATUS' , True )
result = reply . split ( '\n' )
status = ''
for l in result :
if l . startswith ( 'wpa_state=' ) :
status = l [ 10 : ]
return status_dict [ status . lower ( ) ] |
def push_subscription_decrypt_push ( self , data , decrypt_params , encryption_header , crypto_key_header ) :
"""Decrypts ` data ` received in a webpush request . Requires the private key dict
from ` push _ subscription _ generate _ keys ( ) ` _ ( ` decrypt _ params ` ) as well as the
Encryption and server Crypto - Key headers from the received webpush
Returns the decoded webpush as a ` push notification dict ` _ .""" | salt = self . __decode_webpush_b64 ( encryption_header . split ( "salt=" ) [ 1 ] . strip ( ) )
dhparams = self . __decode_webpush_b64 ( crypto_key_header . split ( "dh=" ) [ 1 ] . split ( ";" ) [ 0 ] . strip ( ) )
p256ecdsa = self . __decode_webpush_b64 ( crypto_key_header . split ( "p256ecdsa=" ) [ 1 ] . strip ( ) )
dec_key = ec . derive_private_key ( decrypt_params [ 'privkey' ] , ec . SECP256R1 ( ) , default_backend ( ) )
decrypted = http_ece . decrypt ( data , salt = salt , key = p256ecdsa , private_key = dec_key , dh = dhparams , auth_secret = decrypt_params [ 'auth' ] , keylabel = "P-256" , version = "aesgcm" )
return json . loads ( decrypted . decode ( 'utf-8' ) , object_hook = Mastodon . __json_hooks ) |
def count_single_dots ( self ) :
"""Count all strokes of this recording that have only a single dot .""" | pointlist = self . get_pointlist ( )
single_dots = 0
for stroke in pointlist :
if len ( stroke ) == 1 :
single_dots += 1
return single_dots |
def _lua_to_python ( lval , return_status = False ) :
"""Convert Lua object ( s ) into Python object ( s ) , as at times Lua object ( s )
are not compatible with Python functions""" | import lua
lua_globals = lua . globals ( )
if lval is None : # Lua None - - > Python None
return None
if lua_globals . type ( lval ) == "table" : # Lua table - - > Python list
pval = [ ]
for i in lval :
if return_status :
if i == 'ok' :
return lval [ i ]
if i == 'err' :
raise ResponseError ( lval [ i ] )
pval . append ( Script . _lua_to_python ( lval [ i ] ) )
return pval
elif isinstance ( lval , long ) : # Lua number - - > Python long
return long ( lval )
elif isinstance ( lval , float ) : # Lua number - - > Python float
return float ( lval )
elif lua_globals . type ( lval ) == "userdata" : # Lua userdata - - > Python string
return str ( lval )
elif lua_globals . type ( lval ) == "string" : # Lua string - - > Python string
return lval
elif lua_globals . type ( lval ) == "boolean" : # Lua boolean - - > Python bool
return bool ( lval )
raise RuntimeError ( "Invalid Lua type: " + str ( lua_globals . type ( lval ) ) ) |
def _get_setter_fun ( object_type , # type : Type
parameter , # type : Parameter
private_property_name # type : str
) :
"""Utility method to find the overridden setter function for a given property , or generate a new one
: param object _ type :
: param property _ name :
: param property _ type :
: param private _ property _ name :
: return :""" | # the property will have the same name than the constructor argument
property_name = parameter . name
overridden_setters = getmembers ( object_type , _has_annotation ( __SETTER_OVERRIDE_ANNOTATION , property_name ) )
if len ( overridden_setters ) > 0 : # - - check that we only have one
if len ( overridden_setters ) > 1 :
raise DuplicateOverrideError ( 'Setter is overridden more than once for attribute name : %s' % property_name )
# - - use the overridden setter
setter_fun = overridden_setters [ 0 ] [ 1 ]
try : # python 2
setter_fun = setter_fun . im_func
except AttributeError :
pass
# - - find the parameter name and check the signature
s = signature ( setter_fun )
p = [ attribute_name for attribute_name , param in s . parameters . items ( ) if attribute_name is not 'self' ]
if len ( p ) != 1 :
try :
qname = setter_fun . __qualname__
except AttributeError :
qname = setter_fun . __name__
raise IllegalSetterSignatureException ( 'overridden setter must have only 1 non-self argument, found ' + '%s for function %s' '' % ( len ( s . parameters . items ( ) ) - 1 , qname ) )
var_name = p [ 0 ]
else : # - - create the setter , equivalent of :
# * * Dynamically compile a wrapper with correct argument name * *
sig = Signature ( parameters = [ Parameter ( 'self' , kind = Parameter . POSITIONAL_OR_KEYWORD ) , parameter ] )
@ with_signature ( sig )
def autoprops_generated_setter ( self , ** kwargs ) :
setattr ( self , private_property_name , kwargs . popitem ( ) [ 1 ] )
setter_fun = autoprops_generated_setter
var_name = property_name
return setter_fun , var_name |
def compute ( chart ) :
"""Computes the behavior .""" | factors = [ ]
# Planets in House1 or Conjunct Asc
house1 = chart . getHouse ( const . HOUSE1 )
planetsHouse1 = chart . objects . getObjectsInHouse ( house1 )
asc = chart . getAngle ( const . ASC )
planetsConjAsc = chart . objects . getObjectsAspecting ( asc , [ 0 ] )
_set = _merge ( planetsHouse1 , planetsConjAsc )
factors . append ( [ 'Planets in House1 or Conj Asc' , _set ] )
# Planets conjunct Moon or Mercury
moon = chart . get ( const . MOON )
mercury = chart . get ( const . MERCURY )
planetsConjMoon = chart . objects . getObjectsAspecting ( moon , [ 0 ] )
planetsConjMercury = chart . objects . getObjectsAspecting ( mercury , [ 0 ] )
_set = _merge ( planetsConjMoon , planetsConjMercury )
factors . append ( [ 'Planets Conj Moon or Mercury' , _set ] )
# Asc ruler if aspected by disposer
ascRulerID = essential . ruler ( asc . sign )
ascRuler = chart . getObject ( ascRulerID )
disposerID = essential . ruler ( ascRuler . sign )
disposer = chart . getObject ( disposerID )
_set = [ ]
if aspects . isAspecting ( disposer , ascRuler , const . MAJOR_ASPECTS ) :
_set = [ ascRuler . id ]
factors . append ( [ 'Asc Ruler if aspected by its disposer' , _set ] ) ;
# Planets aspecting Moon or Mercury
aspMoon = chart . objects . getObjectsAspecting ( moon , [ 60 , 90 , 120 , 180 ] )
aspMercury = chart . objects . getObjectsAspecting ( mercury , [ 60 , 90 , 120 , 180 ] )
_set = _merge ( aspMoon , aspMercury )
factors . append ( [ 'Planets Asp Moon or Mercury' , _set ] )
return factors |
def time_correlation_by_diagonalization ( P , pi , obs1 , obs2 = None , time = 1 , rdl = None ) :
"""calculates time correlation . Raises P to power ' times ' by diagonalization .
If rdl tuple ( R , D , L ) is given , it will be used for
further calculation .""" | if rdl is None :
raise ValueError ( "no rdl decomposition" )
R , D , L = rdl
d_times = np . diag ( D ) ** time
diag_inds = np . diag_indices_from ( D )
D_time = np . zeros ( D . shape , dtype = d_times . dtype )
D_time [ diag_inds ] = d_times
P_time = np . dot ( np . dot ( R , D_time ) , L )
# multiply element - wise obs1 and pi . this is obs1 ' diag ( pi )
l = np . multiply ( obs1 , pi )
m = np . dot ( P_time , obs2 )
result = np . dot ( l , m )
return result |
def builddata ( self , mopt = None , data = None , pdata = None , prior = None ) :
"""Rebuild pdata to account for marginalization .""" | if pdata is None :
if data is None :
raise ValueError ( 'no data or pdata' )
pdata = gvar . BufferDict ( )
for m in self . flatmodels :
pdata [ m . datatag ] = ( m . builddata ( data ) if m . ncg <= 1 else MultiFitter . coarse_grain ( m . builddata ( data ) , m . ncg ) )
else :
npdata = gvar . BufferDict ( )
for m in self . flatmodels :
npdata [ m . datatag ] = pdata [ m . datatag ]
pdata = npdata
if mopt is not None :
fitfcn = self . buildfitfcn ( )
p_all = self . buildprior ( prior = prior , mopt = None )
f_all = fitfcn ( p_all )
# fcn with part we want to keep
p_trunc = self . buildprior ( prior = prior , mopt = mopt )
f_trunc = fitfcn ( p_trunc )
# correct pdata
pdata = gvar . BufferDict ( pdata )
if not self . ratio :
for m in self . flatmodels :
pdata [ m . datatag ] += f_trunc [ m . datatag ] - f_all [ m . datatag ]
else :
for m in self . flatmodels :
ii = ( gvar . mean ( f_all [ m . datatag ] ) != 0 )
ratio = f_trunc [ m . datatag ] [ ii ] / f_all [ m . datatag ] [ ii ]
pdata [ m . datatag ] [ ii ] *= ratio
return pdata |
def items_iter ( self , limit ) :
'''Get an iterator of the ' items ' in each page . Instead of a feature
collection from each page , the iterator yields the features .
: param int limit : The number of ' items ' to limit to .
: return : iter of items in page''' | pages = ( page . get ( ) for page in self . _pages ( ) )
items = itertools . chain . from_iterable ( ( p [ self . ITEM_KEY ] for p in pages ) )
if limit is not None :
items = itertools . islice ( items , limit )
return items |
def parse ( lang_sample ) :
"""tally word popularity using novel extracts , etc""" | words = words_from_archive ( lang_sample , include_dups = True )
counts = zero_default_dict ( )
for word in words :
counts [ word ] += 1
return set ( words ) , counts |
def send_post ( self , mri , method_name , ** params ) :
"""Abstract method to dispatch a Post to the server
Args :
mri ( str ) : The mri of the Block
method _ name ( str ) : The name of the Method within the Block
params : The parameters to send
Returns :
The return results from the server""" | typ , parameters = convert_to_type_tuple_value ( serialize_object ( params ) )
uri = NTURI ( typ [ 2 ] )
uri = uri . wrap ( path = "%s.%s" % ( mri , method_name ) , kws = parameters , scheme = "pva" )
value = self . _ctxt . rpc ( mri , uri , timeout = None )
return convert_value_to_dict ( value ) |
def main ( ) :
"""Main program .""" | args = command . parse_args ( )
with btrfs . FileSystem ( args . dir ) as mount : # mount . rescanSizes ( )
fInfo = mount . FS_INFO ( )
pprint . pprint ( fInfo )
vols = mount . subvolumes
# for dev in mount . devices :
# pprint . pprint ( dev )
for vol in vols :
print ( vol )
return 0 |
def dynamic ( cls , label , val_mean , val_range ) :
"""Creates a static parameter .
Parameters
label : str
A human - readable label for the parameter .
val _ mean : float
The mean value of the parameter .
val _ range : float
The minimum and maximum variance from the mean allowed for
parameter .""" | return cls ( label , ParameterType . DYNAMIC , ( val_mean , val_range ) ) |
def validate_url ( url ) :
"""Validates the URL
: param url :
: return :""" | if validators . url ( url ) :
return url
elif validators . domain ( url ) :
return "http://{}" . format ( url )
return "" |
def outerproduct ( a , b ) :
"""Numeric . outerproduct ( [ 0.46474895 , 0.46348238 , 0.53923529 , 0.46428344 , 0.50223047 ] ,
[ - 0.16049719 , 0.17086812 , 0.1692107 , 0.17433657 , 0.1738235 ,
0.17292975 , 0.17553493 , 0.17222987 , - 0.17038313 , 0.17725782,
0.18428386 ] ) = >
[ [ - 0.0745909 , 0.07941078 , 0.07864049 , 0.08102274 , 0.08078429 , 0.08036892,
0.08157967 , 0.08004365 , - 0.07918538 , 0.08238038 , 0.08564573]
[ - 0.07438762 , 0.07919436 , 0.07842618 , 0.08080193 , 0.08056413 , 0.08014989,
0.08135735 , 0.07982551 , - 0.07896958 , 0.08215587 , 0.08541232]
[ - 0.08654575 , 0.09213812 , 0.09124438 , 0.09400843 , 0.09373177 , 0.09324982,
0.09465463 , 0.09287243 , - 0.09187659 , 0.09558367 , 0.09937236]
[ - 0.07451619 , 0.07933124 , 0.07856172 , 0.08094158 , 0.08070337 , 0.08028842,
0.08149796 , 0.07996348 , - 0.07910606 , 0.08229787 , 0.08555994]
[ - 0.08060658 , 0.08581518 , 0.08498277 , 0.08755714 , 0.08729946 , 0.08685059,
0.08815899 , 0.08649909 , - 0.0855716 , 0.08902428 , 0.09255297 ] ] )""" | result = zeros ( ( len ( a ) , len ( b ) ) )
for i in range ( len ( a ) ) :
for j in range ( len ( b ) ) :
result [ i ] [ j ] = a [ i ] * b [ j ]
return result |
def str_dict_cast ( dict_ , include_keys = True , include_vals = True , ** kwargs ) :
"""Converts any bytes - like items in input dict to string - like values , with
respect to python version
Parameters
dict _ : dict
any bytes - like objects contained in the dict will be converted to a
string
include _ keys : bool , default = True
if True , cast keys to a string , else ignore
include _ values : bool , default = True
if True , cast values to a string , else ignore
kwargs :
encoding : str , default : ' utf - 8'
encoding to be used when decoding bytes""" | new_keys = str_list_cast ( dict_ . keys ( ) , ** kwargs ) if include_keys else dict_ . keys ( )
new_vals = str_list_cast ( dict_ . values ( ) , ** kwargs ) if include_vals else dict_ . values ( )
new_dict = dict ( zip_ ( new_keys , new_vals ) )
return new_dict |
def program_checks ( job , input_args ) :
"""Checks that dependency programs are installed .
input _ args : dict Dictionary of input arguments ( from main ( ) )""" | # Program checks
for program in [ 'curl' , 'docker' , 'unzip' , 'samtools' ] :
assert which ( program ) , 'Program "{}" must be installed on every node.' . format ( program )
job . addChildJobFn ( download_shared_files , input_args ) |
def get_email_body ( self , sample ) :
"""Returns the email body text""" | retest = sample . getRetest ( )
lab_address = api . get_bika_setup ( ) . laboratory . getPrintAddress ( )
setup = api . get_setup ( )
body = Template ( setup . getEmailBodySampleInvalidation ( ) ) . safe_substitute ( dict ( sample_link = self . get_html_link ( sample ) , retest_link = self . get_html_link ( retest ) , sample_id = api . get_id ( sample ) , retest_id = api . get_id ( retest ) , lab_address = "<br/>" . join ( lab_address ) ) )
return body |
def to_df ( self , recommended_only = False , include_io = True ) :
"""Return a pandas DataFrame for each model and dataset .
Parameters
recommended _ only : bool , optional
If True , only recommended models for each session are included . If
no model is recommended , then a row with it ' s ID will be included ,
but all fields will be null .
include _ io : bool , optional
If True , then the input / output files from BMDS will also be
included , specifically the ( d ) input file and the out file .
Returns
out : pandas . DataFrame
Data frame containing models and outputs""" | od = BMDS . _df_ordered_dict ( include_io )
[ session . _add_to_to_ordered_dict ( od , i , recommended_only ) for i , session in enumerate ( self ) ]
return pd . DataFrame ( od ) |
def run ( configobj = None ) :
"""TEAL interface for the ` acsccd ` function .""" | acsccd ( configobj [ 'input' ] , exec_path = configobj [ 'exec_path' ] , time_stamps = configobj [ 'time_stamps' ] , verbose = configobj [ 'verbose' ] , quiet = configobj [ 'quiet' ] # dqicorr = configobj [ ' dqicorr ' ] ,
# atodcorr = configobj [ ' atodcorr ' ] ,
# blevcorr = configobj [ ' blevcorr ' ] ,
# biascorr = configobj [ ' biascorr ' ]
) |
async def check_authorized ( self , identity ) :
"""Works like : func : ` Security . identity ` , but when check is failed
: func : ` UnauthorizedError ` exception is raised .
: param identity : Claim
: return : Checked claim or return ` ` None ` `
: raise : : func : ` UnauthorizedError `""" | identify = await self . identify ( identity )
if identify is None :
raise UnauthorizedError ( )
return identify |
def vote_total ( self ) :
"""Calculates vote total as total _ upvotes - total _ downvotes . We are adding a method here instead of relying on django - secretballot ' s addition since that doesn ' t work for subclasses .""" | modelbase_obj = self . modelbase_obj
return modelbase_obj . votes . filter ( vote = + 1 ) . count ( ) - modelbase_obj . votes . filter ( vote = - 1 ) . count ( ) |
def load_file ( self , path = None , just_settings = False ) :
"""Loads a data file . After the file is loaded , calls self . after _ load _ file ( self ) ,
which you can overwrite if you like !
just _ settings = True will only load the configuration of the controls ,
and will not plot anything or run after _ load _ file""" | # if it ' s just the settings file , make a new databox
if just_settings :
d = _d . databox ( )
header_only = True
# otherwise use the internal databox
else :
d = self
header_only = False
# import the settings if they exist in the header
if not None == _d . databox . load_file ( d , path , filters = self . file_type , header_only = header_only , quiet = just_settings ) : # loop over the autosettings and update the gui
for x in self . _autosettings_controls :
self . _load_gui_setting ( x , d )
# always sync the internal data
self . _synchronize_controls ( )
# plot the data if this isn ' t just a settings load
if not just_settings :
self . plot ( )
self . after_load_file ( ) |
def _records_commit ( record_ids ) :
"""Commit all records .""" | for record_id in record_ids :
record = Record . get_record ( record_id )
record . commit ( ) |
def get_next_token ( string ) :
'''" eats " up the string until it hits an ending character to get valid leaf expressions .
For example , given \\ Phi _ { z } ( L ) = \\ sum _ { i = 1 } ^ { N } \\ frac { 1 } { C _ { i } \\ times V _ { \\ rm max , i } } ,
this function would pull out \\ Phi , stopping at _
@ string : str
returns a tuple of ( expression [ ex : \\ Phi ] , remaining _ chars [ ex : _ { z } ( L ) = \\ sum _ { i = 1 } ^ { N } . . . ] )''' | STOP_CHARS = "_ {}^ \n ,()="
UNARY_CHARS = "^_"
# ^ and _ are valid leaf expressions - - just ones that should be handled on their own
if string [ 0 ] in STOP_CHARS :
return string [ 0 ] , string [ 1 : ]
expression = [ ]
for i , c in enumerate ( string ) :
if c in STOP_CHARS :
break
else :
expression . append ( c )
return "" . join ( expression ) , string [ i : ] |
def _build_models_query ( self , query ) :
"""Builds a query from ` query ` that filters to documents only from registered models .""" | registered_models_ct = self . build_models_list ( )
if registered_models_ct :
restrictions = [ xapian . Query ( '%s%s' % ( TERM_PREFIXES [ DJANGO_CT ] , model_ct ) ) for model_ct in registered_models_ct ]
limit_query = xapian . Query ( xapian . Query . OP_OR , restrictions )
query = xapian . Query ( xapian . Query . OP_AND , query , limit_query )
return query |
def get_domain_connect_template_async_context ( self , domain , provider_id , service_id , redirect_uri , params = None , state = None , service_id_in_path = False ) :
"""Makes full Domain Connect discovery of a domain and returns full context to request async consent .
: param domain : str
: param provider _ id : str
: param service _ id : str
: param redirect _ uri : str
: param params : dict
: param state : str
: param service _ id _ in _ path : bool
: return : ( DomainConnectAsyncContext , str )
asyncConsentUrl field of returned context shall be used to redirect the browser to
second field is an indication of error
: raises : NoDomainConnectRecordException
when no _ domainconnect record found
: raises : NoDomainConnectSettingsException
when settings are not found
: raises : TemplateNotSupportedException
when template is not found
: raises : InvalidDomainConnectSettingsException
when parts of the settings are missing
: raises : DomainConnectException
on other domain connect issues""" | if params is None :
params = { }
config = self . get_domain_config ( domain )
self . check_template_supported ( config , provider_id , service_id )
if config . urlAsyncUX is None :
raise InvalidDomainConnectSettingsException ( "No asynch UX URL in config" )
if service_id_in_path :
if type ( service_id ) is list :
raise DomainConnectException ( "Multiple services are only supported with service_id_in_path=false" )
async_url_format = '{0}/v2/domainTemplates/providers/{1}/services/{2}' '?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
else :
if type ( service_id ) is list :
service_id = '+' . join ( service_id )
async_url_format = '{0}/v2/domainTemplates/providers/{1}' '?client_id={1}&scope={2}&domain={3}&host={4}&{5}'
if redirect_uri is not None :
params [ "redirect_uri" ] = redirect_uri
if state is not None :
params [ "state" ] = state
ret = DomainConnectAsyncContext ( config , provider_id , service_id , redirect_uri , params )
ret . asyncConsentUrl = async_url_format . format ( config . urlAsyncUX , provider_id , service_id , config . domain_root , config . host , urllib . parse . urlencode ( sorted ( params . items ( ) , key = lambda val : val [ 0 ] ) ) )
return ret |
def _get_identifiers ( self , limit ) :
"""This will process the id mapping file provided by Biogrid .
The file has a very large header , which we scan past ,
then pull the identifiers , and make equivalence axioms
: param limit :
: return :""" | LOG . info ( "getting identifier mapping" )
line_counter = 0
f = '/' . join ( ( self . rawdir , self . files [ 'identifiers' ] [ 'file' ] ) )
myzip = ZipFile ( f , 'r' )
# assume that the first entry is the item
fname = myzip . namelist ( ) [ 0 ]
foundheader = False
# TODO align this species filter with the one above
# speciesfilters = ' Homo sapiens , Mus musculus , Drosophila melanogaster ,
# Danio rerio , Caenorhabditis elegans , Xenopus laevis ' . split ( ' , ' )
speciesfilters = 'Homo sapiens,Mus musculus' . split ( ',' )
with myzip . open ( fname , 'r' ) as csvfile :
for line in csvfile : # skip header lines
if not foundheader :
if re . match ( r'BIOGRID_ID' , line . decode ( ) ) :
foundheader = True
continue
line = line . decode ( ) . strip ( )
# BIOGRID _ ID
# IDENTIFIER _ VALUE
# IDENTIFIER _ TYPE
# ORGANISM _ OFFICIAL _ NAME
# 1814566ENTREZ _ GENEArabidopsis thaliana
( biogrid_num , id_num , id_type , organism_label ) = line . split ( '\t' )
if self . test_mode :
graph = self . testgraph
# skip any genes that don ' t match our test set
if int ( biogrid_num ) not in self . biogrid_ids :
continue
else :
graph = self . graph
model = Model ( graph )
# for each one of these ,
# create the node and add equivalent classes
biogrid_id = 'BIOGRID:' + biogrid_num
prefix = self . localtt [ id_type ]
# TODO make these filters available as commandline options
# geneidtypefilters = ' NCBIGene , OMIM , MGI , FlyBase , ZFIN , MGI , HGNC ,
# WormBase , XenBase , ENSEMBL , miRBase ' . split ( ' , ' )
geneidtypefilters = 'NCBIGene,MGI,ENSEMBL,ZFIN,HGNC' . split ( ',' )
# proteinidtypefilters = ' HPRD , Swiss - Prot , NCBIProtein '
if ( speciesfilters is not None ) and ( organism_label . strip ( ) in speciesfilters ) :
line_counter += 1
if ( geneidtypefilters is not None ) and ( prefix in geneidtypefilters ) :
mapped_id = ':' . join ( ( prefix , id_num ) )
model . addEquivalentClass ( biogrid_id , mapped_id )
# this symbol will only get attached to the biogrid class
elif id_type == 'OFFICIAL_SYMBOL' :
model . addClassToGraph ( biogrid_id , id_num )
# elif ( id _ type = = ' SYNONYM ' ) :
# FIXME - i am not sure these are synonyms , altids ?
# gu . addSynonym ( g , biogrid _ id , id _ num )
if not self . test_mode and limit is not None and line_counter > limit :
break
myzip . close ( )
return |
def fetch_plos_images ( article_doi , output_dir , document ) :
"""Fetch the images for a PLoS article from the internet .
PLoS images are known through the inspection of < graphic > and
< inline - graphic > elements . The information in these tags are then parsed
into appropriate URLs for downloading .""" | log . info ( 'Processing images for {0}...' . format ( article_doi ) )
# A dict of URLs for PLoS subjournals
journal_urls = { 'pgen' : 'http://www.plosgenetics.org/article/{0}' , 'pcbi' : 'http://www.ploscompbiol.org/article/{0}' , 'ppat' : 'http://www.plospathogens.org/article/{0}' , 'pntd' : 'http://www.plosntds.org/article/{0}' , 'pmed' : 'http://www.plosmedicine.org/article/{0}' , 'pbio' : 'http://www.plosbiology.org/article/{0}' , 'pone' : 'http://www.plosone.org/article/{0}' , 'pctr' : 'http://clinicaltrials.ploshubs.org/article/{0}' }
# Identify subjournal name for base URL
subjournal_name = article_doi . split ( '.' ) [ 1 ]
base_url = journal_urls [ subjournal_name ]
# Acquire < graphic > and < inline - graphic > xml elements
graphics = document . document . getroot ( ) . findall ( './/graphic' )
graphics += document . document . getroot ( ) . findall ( './/inline-graphic' )
# Begin to download
log . info ( 'Downloading images, this may take some time...' )
for graphic in graphics :
nsmap = document . document . getroot ( ) . nsmap
xlink_href = graphic . attrib [ '{' + nsmap [ 'xlink' ] + '}' + 'href' ]
# Equations are handled a bit differently than the others
# Here we decide that an image name starting with " e " is an equation
if xlink_href . split ( '.' ) [ - 1 ] . startswith ( 'e' ) :
resource = 'fetchObject.action?uri=' + xlink_href + '&representation=PNG'
else :
resource = xlink_href + '/largerimage'
full_url = base_url . format ( resource )
try :
image = urllib . request . urlopen ( full_url )
except urllib . error . HTTPError as e :
if e . code == 503 : # Server overload error
time . sleep ( 1 )
# Wait a second
try :
image = urllib . request . urlopen ( full_url )
except :
return False
# Happened twice , give up
else :
log . error ( 'urllib.error.HTTPError {0}' . format ( e . code ) )
return False
else :
img_name = xlink_href . split ( '.' ) [ - 1 ] + '.png'
img_path = os . path . join ( output_dir , img_name )
with open ( img_path , 'wb' ) as output :
output . write ( image . read ( ) )
log . info ( 'Downloaded image {0}' . format ( img_name ) )
log . info ( 'Done downloading images' )
return True |
def _convert_sam_function_resource ( name , resource_properties , layers ) :
"""Converts a AWS : : Serverless : : Function resource to a Function configuration usable by the provider .
: param string name : LogicalID of the resource NOTE : This is * not * the function name because not all functions
declare a name
: param dict resource _ properties : Properties of this resource
: return samcli . commands . local . lib . provider . Function : Function configuration""" | codeuri = SamFunctionProvider . _extract_sam_function_codeuri ( name , resource_properties , "CodeUri" )
LOG . debug ( "Found Serverless function with name='%s' and CodeUri='%s'" , name , codeuri )
return Function ( name = name , runtime = resource_properties . get ( "Runtime" ) , memory = resource_properties . get ( "MemorySize" ) , timeout = resource_properties . get ( "Timeout" ) , handler = resource_properties . get ( "Handler" ) , codeuri = codeuri , environment = resource_properties . get ( "Environment" ) , rolearn = resource_properties . get ( "Role" ) , layers = layers ) |
def status ( self ) :
"""Set messages""" | self . count_repo += 1
if self . check == 1 :
self . count_news += 1
self . st = "{0}News in ChangeLog.txt{1}" . format ( self . meta . color [ "GREEN" ] , self . meta . color [ "ENDC" ] )
elif self . check == 0 :
self . st = "No changes in ChangeLog.txt" |
def set_button_visible ( self , visible ) :
"""Sets the clear button as ` ` visible ` `
: param visible : Visible state ( True = visible , False = hidden ) .""" | self . button . setVisible ( visible )
left , top , right , bottom = self . getTextMargins ( )
if visible :
right = self . _margin + self . _spacing
else :
right = 0
self . setTextMargins ( left , top , right , bottom ) |
def native ( self ) :
"""The native Python datatype representation of this value
: return :
A unicode string or None""" | if self . contents is None :
return None
if self . _native is None :
self . _native = self . _map [ self . __int__ ( ) ]
return self . _native |
def get_wifiinfo ( self , callb = None ) :
"""Convenience method to request the wifi info from the device
This will request the information from the device and request that callb be executed
when a response is received . The is no default callback
: param callb : Callable to be used when the response is received . If not set ,
self . resp _ set _ label will be used .
: type callb : callable
: returns : None
: rtype : None""" | response = self . req_with_resp ( GetWifiInfo , StateWifiInfo , callb = callb )
return None |
def delete ( node_name ) :
"""Delete a specific node""" | result = { }
node = nago . core . get_node ( node_name )
if not node :
result [ 'status' ] = 'error'
result [ 'message' ] = "node not found."
else :
node . delete ( )
result [ 'status' ] = 'success'
result [ 'message' ] = 'node deleted.'
return result |
def read_l2tp ( self , length ) :
"""Read Layer Two Tunnelling Protocol .
Structure of L2TP header [ RFC 2661 ] :
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
| T | L | x | x | S | x | O | P | x | x | x | x | Ver | Length ( opt ) |
| Tunnel ID | Session ID |
| Ns ( opt ) | Nr ( opt ) |
| Offset Size ( opt ) | Offset pad . . . ( opt )
Octets Bits Name Description
0 0 l2tp . flags Flags and Version Info
0 0 l2tp . flags . type Type ( 0/1)
0 1 l2tp . flags . len Length
0 2 - Reserved ( must be zero )
0 4 l2tp . flags . seq Sequence
0 5 - Reserved ( must be zero )
0 6 l2tp . flags . offset Offset
0 7 l2tp . flags . prio Priority
1 8 - Reserved ( must be zero )
1 12 l2tp . ver Version ( 2)
2 16 l2tp . length Length ( optional by len )
4 32 l2tp . tunnelid Tunnel ID
6 48 l2tp . sessionid Session ID
8 64 l2tp . ns Sequence Number ( optional by seq )
10 80 l2tp . nr Next Sequence Number ( optional by seq )
12 96 l2tp . offset Offset Size ( optional by offset )""" | if length is None :
length = len ( self )
_flag = self . _read_binary ( 1 )
_vers = self . _read_fileng ( 1 ) . hex ( ) [ 1 ]
_hlen = self . _read_unpack ( 2 ) if int ( _flag [ 1 ] ) else None
_tnnl = self . _read_unpack ( 2 )
_sssn = self . _read_unpack ( 2 )
_nseq = self . _read_unpack ( 2 ) if int ( _flag [ 4 ] ) else None
_nrec = self . _read_unpack ( 2 ) if int ( _flag [ 4 ] ) else None
_size = self . _read_unpack ( 2 ) if int ( _flag [ 6 ] ) else 0
l2tp = dict ( flags = dict ( type = 'Control' if int ( _flag [ 0 ] ) else 'Data' , len = True if int ( _flag [ 1 ] ) else False , seq = True if int ( _flag [ 4 ] ) else False , offset = True if int ( _flag [ 6 ] ) else False , prio = True if int ( _flag [ 7 ] ) else False , ) , ver = int ( _vers , base = 16 ) , length = _hlen , tunnelid = _tnnl , sessionid = _sssn , ns = _nseq , nr = _nrec , offset = 8 * _size or None , )
hdr_len = _hlen or ( 6 + 2 * ( int ( _flag [ 1 ] ) + 2 * int ( _flag [ 4 ] ) + int ( _flag [ 6 ] ) ) )
l2tp [ 'hdr_len' ] = hdr_len + _size * 8
# if _ size :
# l2tp [ ' padding ' ] = self . _ read _ fileng ( _ size * 8)
length -= l2tp [ 'hdr_len' ]
l2tp [ 'packet' ] = self . _read_packet ( header = l2tp [ 'hdr_len' ] , payload = length )
return self . _decode_next_layer ( l2tp , length ) |
def scan_patch ( project , patch_file , binaries , ips , urls , file_audit_list , file_audit_project_list , flag_list , ignore_list , file_ignore , ignore_directories , url_ignore , ip_ignore , apikey ) :
"""Scan actions for each commited file in patch set""" | global failure
split_path = patch_file . split ( project + '/' , 1 ) [ - 1 ]
if not any ( x in split_path for x in ignore_directories ) :
if is_binary ( patch_file ) and binaries :
hashlist = get_lists . GetLists ( )
binary_hash = hashlist . binary_hash ( project , split_path )
with open ( patch_file , 'rb' ) as afile :
hasher = hashlib . sha256 ( )
buf = afile . read ( )
hasher . update ( buf )
sha256hash = hasher . hexdigest ( )
if sha256hash in binary_hash :
logger . info ( 'Found matching file hash for: %s' , patch_file )
else :
logger . info ( 'sha256hash: %s' , sha256hash )
logger . error ( 'Non Whitelisted Binary file: %s' , patch_file )
scan_binary ( patch_file , project , sha256hash , apikey )
failure = True
with open ( reports_dir + "binaries-" + project + ".log" , "a" ) as gate_report :
gate_report . write ( 'Non Whitelisted Binary file: {0}\n' . format ( patch_file ) )
else : # Check file names / extensions
if file_audit_list . search ( patch_file ) and not file_audit_project_list . search ( patch_file ) :
match = file_audit_list . search ( patch_file )
logger . error ( 'Blacklisted file: %s' , patch_file )
logger . error ( 'Matched String: %s' , match . group ( ) )
failure = True
with open ( reports_dir + "file-names_" + project + ".log" , "a" ) as gate_report :
gate_report . write ( 'Blacklisted file: {0}\n' . format ( patch_file ) )
gate_report . write ( 'Matched String: {0}' . format ( match . group ( ) ) )
# Open file to check for blacklisted content
if not is_binary ( patch_file ) :
try :
fo = open ( patch_file , 'r' )
lines = fo . readlines ( )
file_exists = True
except IOError :
file_exists = False
if file_exists and not patch_file . endswith ( tuple ( file_ignore ) ) :
for line in lines : # Find IP Addresses and send for report to Virus Total
if ips :
ipaddr = re . findall ( r'(?:\d{1,3}\.)+(?:\d{1,3})' , line )
if ipaddr :
ipaddr = ipaddr [ 0 ]
if re . search ( ip_ignore , ipaddr ) :
logger . info ( '%s is in IP ignore list.' , ipaddr )
else :
try :
ipaddress . ip_address ( ipaddr ) . is_global
scan_ipaddr ( ipaddr , apikey )
except :
pass
# Ok to pass here , as this captures the odd string which is not an IP Address
# Check for URLs and send for report to Virus Total
if urls :
url = re . search ( "(?P<url>https?://[^\s]+)" , line ) or re . search ( "(?P<url>www[^\s]+)" , line )
if url :
url = url . group ( "url" )
if re . search ( url_ignore , url ) :
logger . info ( '%s is in URL ignore list.' , url )
else :
scan_url ( url , apikey )
# Perform search within text files
for key , value in flag_list . items ( ) :
regex = value [ 'regex' ]
desc = value [ 'desc' ]
if re . search ( regex , line ) and not re . search ( ignore_list , line ) :
logger . error ( 'File contains violation: %s' , patch_file )
logger . error ( 'Flagged Content: %s' , line . rstrip ( ) )
logger . error ( 'Rationale: %s' , desc . rstrip ( ) )
failure = True
with open ( reports_dir + "contents_" + project + ".log" , "a" ) as gate_report :
gate_report . write ( 'File contains violation: {0}\n' . format ( patch_file ) )
gate_report . write ( 'Flagged Content: {0}' . format ( line ) )
gate_report . write ( 'Matched Regular Exp: {0}\n' . format ( regex ) )
gate_report . write ( 'Rationale: {0}\n' . format ( desc . rstrip ( ) ) ) |
def model_typedefs ( vk , model ) :
"""Fill the model with typedefs
model [ ' typedefs ' ] = { ' name ' : ' type ' , . . . }""" | model [ 'typedefs' ] = { }
# bitmasks and basetypes
bitmasks = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'bitmask' ]
basetypes = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'basetype' ]
for typedef in bitmasks + basetypes :
if not typedef . get ( 'type' ) :
continue
model [ 'typedefs' ] [ typedef [ 'name' ] ] = typedef [ 'type' ]
# handles
handles = [ x for x in vk [ 'registry' ] [ 'types' ] [ 'type' ] if x . get ( '@category' ) == 'handle' ]
for handle in handles :
if 'name' not in handle or 'type' not in handle :
continue
n = handle [ 'name' ]
t = handle [ 'type' ]
if t == 'VK_DEFINE_HANDLE' :
model [ 'typedefs' ] [ 'struct %s_T' % n ] = '*%s' % n
if t == 'VK_DEFINE_HANDLE' :
model [ 'typedefs' ] [ n ] = 'uint64_t'
# custom plaform dependant
for name in [ 'Display' , 'xcb_connection_t' , 'wl_display' , 'wl_surface' , 'MirConnection' , 'MirSurface' , 'ANativeWindow' , 'SECURITY_ATTRIBUTES' ] :
model [ 'typedefs' ] [ name ] = 'struct %s' % name
model [ 'typedefs' ] . update ( { 'Window' : 'uint32_t' , 'VisualID' : 'uint32_t' , 'xcb_window_t' : 'uint32_t' , 'xcb_visualid_t' : 'uint32_t' } ) |
def newest ( self ) :
"""Gets the newest entry in the view , regardless of sort order""" | if self . _order_by == 'newest' :
return self . first
if self . _order_by == 'oldest' :
return self . last
return max ( self . entries , key = lambda x : ( x . date , x . id ) ) |
def enabled ( self ) :
"""whether the user is allowed to interact with the
widget . Item is enabled only if all it ' s parent elements are""" | enabled = self . _enabled
if not enabled :
return False
if self . parent and isinstance ( self . parent , Widget ) :
if self . parent . enabled == False :
return False
return True |
def get_artists ( self , search , start = 0 , max_items = 100 ) :
"""Search for artists .
See get _ music _ service _ information for details on the arguments""" | return self . get_music_service_information ( 'artists' , search , start , max_items ) |
def create_skeleton ( self ) :
"""Create the role ' s directory and file structure .""" | utils . string_to_file ( os . path . join ( self . output_path , "VERSION" ) , "master\n" )
for folder in c . ANSIBLE_FOLDERS :
create_folder_path = os . path . join ( self . output_path , folder )
utils . mkdir_p ( create_folder_path )
mainyml_template = default_mainyml_template . replace ( "%role_name" , self . role_name )
mainyml_template = mainyml_template . replace ( "%values" , folder )
out_path = os . path . join ( create_folder_path , "main.yml" )
if folder not in ( "templates" , "meta" , "tests" , "files" ) :
utils . string_to_file ( out_path , mainyml_template )
if folder == "meta" :
utils . create_meta_main ( out_path , self . config , self . role_name , self . options . galaxy_categories ) |
def create_secret ( namespace , name , sources , apiserver_url = None , force = False , update = False , saltenv = 'base' ) :
'''. . versionadded : : 2016.3.0
Create k8s secrets in the defined namespace from the list of files
CLI Example :
. . code - block : : bash
salt ' * ' k8s . create _ secret namespace _ name secret _ name sources
salt ' * ' k8s . create _ secret namespace _ name secret _ name sources
http : / / kube - master . cluster . local
sources are either dictionary of { name : path , name1 : path } pairs or array of strings defining paths .
Example of paths array :
. . code - block : : bash
[ ' / full / path / filename ' , " file : / / / full / path / filename " , " salt : / / secret / storage / file . txt " , " http : / / user : password @ securesite . com / secret - file . json " ]
Example of dictionaries :
. . code - block : : bash
{ " nameit " : ' / full / path / fiename ' , name2 : " salt : / / secret / storage / file . txt " }
optional parameters accepted :
update = [ false ] default value is false
if set to false , and secret is already present on the cluster - warning will be returned and no changes to the secret will be done .
In case it is set to " true " and secret is present but data is differ - secret will be updated .
force = [ true ] default value is true
if the to False , secret will not be created in case one of the files is not
valid kubernetes secret . e . g . capital letters in secret name or _
in case force is set to True , wrong files will be skipped but secret will be created any way .
saltenv = [ ' base ' ] default value is base
in case ' salt : / / ' path is used , this parameter can change the visibility of files''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
if not sources :
return { 'name' : name , 'result' : False , 'comment' : 'No source available' , 'changes' : { } }
apiserver_url = _guess_apiserver ( apiserver_url )
# we need namespace to create secret in it
if not _get_namespaces ( apiserver_url , namespace ) :
if force :
_create_namespace ( namespace , apiserver_url )
else :
return { 'name' : name , 'result' : False , 'comment' : "Namespace doesn't exists" , 'changes' : { } }
secret = _get_secrets ( namespace , name , apiserver_url )
if secret and not update :
log . info ( "Secret %s is already present on %s" , name , namespace )
return { 'name' : name , 'result' : False , 'comment' : 'Secret {0} is already present' . format ( name ) , 'changes' : { } }
data = { }
for source in sources :
log . debug ( "source is: %s" , source )
if isinstance ( source , dict ) : # format is array of dictionaries :
# [ { public _ auth : salt : / / public _ key } , { test : " / tmp / test " } ]
log . trace ( "source is dictionary: %s" , source )
for k , v in six . iteritems ( source ) :
sname , encoded = _source_encode ( v , saltenv )
if sname == encoded == "" :
ret [ 'comment' ] += "Source file {0} is missing or name is incorrect\n" . format ( v )
if force :
continue
else :
return ret
data [ k ] = encoded
elif isinstance ( source , six . string_types ) : # expected format is array of filenames
sname , encoded = _source_encode ( source , saltenv )
if sname == encoded == "" :
if force :
ret [ 'comment' ] += "Source file {0} is missing or name is incorrect\n" . format ( source )
continue
else :
return ret
data [ sname ] = encoded
log . trace ( 'secret data is: %s' , data )
if secret and update :
if not data :
ret [ "comment" ] += "Could not find source files or your sources are empty"
ret [ "result" ] = False
elif secret . get ( "data" ) and data != secret . get ( "data" ) :
res = _update_secret ( namespace , name , data , apiserver_url )
ret [ 'comment' ] = 'Updated secret'
ret [ 'changes' ] = 'Updated secret'
else :
log . debug ( "Secret has not been changed on cluster, skipping it" )
ret [ 'comment' ] = 'Has not been changed on cluster, skipping it'
else :
res = _create_secret ( namespace , name , data , apiserver_url )
return ret |
def retrieve_state_ids ( self , activity , agent , registration = None , since = None ) :
"""Retrieve state id ' s from the LRS with the provided parameters
: param activity : Activity object of desired states
: type activity : : class : ` tincan . activity . Activity `
: param agent : Agent object of desired states
: type agent : : class : ` tincan . agent . Agent `
: param registration : Registration UUID of desired states
: type registration : str | unicode
: param since : Retrieve state id ' s since this time
: type since : str | unicode
: return : LRS Response object with the retrieved state id ' s as content
: rtype : : class : ` tincan . lrs _ response . LRSResponse `""" | if not isinstance ( activity , Activity ) :
activity = Activity ( activity )
if not isinstance ( agent , Agent ) :
agent = Agent ( agent )
request = HTTPRequest ( method = "GET" , resource = "activities/state" )
request . query_params = { "activityId" : activity . id , "agent" : agent . to_json ( self . version ) }
if registration is not None :
request . query_params [ "registration" ] = registration
if since is not None :
request . query_params [ "since" ] = since
lrs_response = self . _send_request ( request )
if lrs_response . success :
lrs_response . content = json . loads ( lrs_response . data )
return lrs_response |
def pong ( self , event = 'PONG' , data = '' , ** kw ) : # pragma : no cover
"""P0NG / PING""" | self . bot . log . debug ( '%s ping-pong (%s)' , event , data )
if self . reconn_handle is not None :
self . reconn_handle . cancel ( )
self . reconn_handle = self . bot . loop . call_later ( self . timeout , self . reconnect )
if self . ping_handle is not None :
self . ping_handle . cancel ( )
self . ping_handle = self . bot . loop . call_later ( self . timeout - self . max_lag , self . bot . send , 'PING :%s' % int ( self . bot . loop . time ( ) ) ) |
def check_ratebase ( self , rate ) :
"""Helper function""" | split = rate [ 'name' ] . partition ( '/' )
if split [ 1 ] :
ratebase = split [ 0 ]
if ratebase != self . base :
raise RuntimeError ( "%s: %s has different base rate:\n%s" % ( self . name , ratebase , rate ) )
elif rate [ 'name' ] == self . base :
pass
# Codes beginning with ' X ' are treated specially , e . g .
# Gold ( XAU ) , Copper ( XCP ) , Palladium ( XPD ) , Platinum ( XPT ) , Silver ( XAG )
# They still appear to be based on USD but are reported with no base
elif not rate [ 'symbol' ] . startswith ( 'X' ) :
self . log ( logging . WARNING , "%s: currency found with no base:\n%s" , self . name , rate ) |
def _slice_vcf_chr21 ( vcf_file , out_dir ) :
"""Slice chr21 of qsignature SNPs to reduce computation time""" | tmp_file = os . path . join ( out_dir , "chr21_qsignature.vcf" )
if not utils . file_exists ( tmp_file ) :
cmd = ( "grep chr21 {vcf_file} > {tmp_file}" ) . format ( ** locals ( ) )
out = subprocess . check_output ( cmd , shell = True )
return tmp_file |
def _fetch_analysis_for_local_id ( analysis , ans_cond ) :
"""This function returns an analysis when the derivative IDs conditions
are met .
: analysis : the analysis full object which we want to obtain the
rules for .
: ans _ cond : the local id with the target derivative reflex rule id .""" | # Getting the first reflexed analysis from the chain
first_reflexed = analysis . getOriginalReflexedAnalysis ( )
# Getting all reflexed analysis created due to this first analysis
analyses_catalog = getToolByName ( analysis , CATALOG_ANALYSIS_LISTING )
derivatives_brains = analyses_catalog ( getOriginalReflexedAnalysisUID = first_reflexed . UID ( ) )
# From all the related reflexed analysis , return the one that matches
# with the local id ' ans _ cond '
for derivative in derivatives_brains :
derivative = derivative . getObject ( )
if derivative . getReflexRuleLocalID ( ) == ans_cond :
return derivative
return None |
def create_namespaced_local_subject_access_review ( self , namespace , body , ** kwargs ) :
"""create a LocalSubjectAccessReview
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . create _ namespaced _ local _ subject _ access _ review ( namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1LocalSubjectAccessReview body : ( required )
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: param str pretty : If ' true ' , then the output is pretty printed .
: return : V1LocalSubjectAccessReview
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . create_namespaced_local_subject_access_review_with_http_info ( namespace , body , ** kwargs )
else :
( data ) = self . create_namespaced_local_subject_access_review_with_http_info ( namespace , body , ** kwargs )
return data |
def type ( self ) :
"""The type of the functional .""" | if self . xc in self . defined_aliases :
return self . defined_aliases [ self . xc ] . type
xc = ( self . x , self . c )
if xc in self . defined_aliases :
return self . defined_aliases [ xc ] . type
# If self is not in defined _ aliases , use LibxcFunc family
if self . xc is not None :
return self . xc . family
return "+" . join ( [ self . x . family , self . c . family ] ) |
def any ( self , values , axis = 0 ) :
"""compute if any item evaluates to true in each group
Parameters
values : array _ like , [ keys , . . . ]
values to take boolean predicate over per group
axis : int , optional
alternative reduction axis for values
Returns
unique : ndarray , [ groups ]
unique keys
reduced : ndarray , [ groups , . . . ] , np . bool
value array , reduced over groups""" | values = np . asarray ( values )
if not values . dtype == np . bool :
values = values != 0
return self . unique , self . reduce ( values , axis = axis ) > 0 |
def show ( self , idx ) :
"""Print the instruction""" | print ( self . get_name ( ) + " " + self . get_output ( idx ) , end = ' ' ) |
def deterministic_generate_k ( generator_order , secret_exponent , val , hash_f = hashlib . sha256 ) :
""": param generator _ order : result from : method : ` pycoin . ecdsa . Generator . Generator . order ` ,
necessary to ensure the k value is within bound
: param secret _ exponent : an integer secret _ exponent to generate the k value for
: param val : the value to be signed , also used as an entropy source for the k value
: returns : an integer k such that ` ` 1 < = k < generator _ order ` ` , complying with
< https : / / tools . ietf . org / html / rfc6979 >""" | n = generator_order
bln = bit_length ( n )
order_size = ( bln + 7 ) // 8
hash_size = hash_f ( ) . digest_size
v = b'\x01' * hash_size
k = b'\x00' * hash_size
priv = intstream . to_bytes ( secret_exponent , length = order_size )
shift = 8 * hash_size - bln
if shift > 0 :
val >>= shift
if val > n :
val -= n
h1 = intstream . to_bytes ( val , length = order_size )
k = hmac . new ( k , v + b'\x00' + priv + h1 , hash_f ) . digest ( )
v = hmac . new ( k , v , hash_f ) . digest ( )
k = hmac . new ( k , v + b'\x01' + priv + h1 , hash_f ) . digest ( )
v = hmac . new ( k , v , hash_f ) . digest ( )
while 1 :
t = bytearray ( )
while len ( t ) < order_size :
v = hmac . new ( k , v , hash_f ) . digest ( )
t . extend ( v )
k1 = intstream . from_bytes ( bytes ( t ) )
k1 >>= ( len ( t ) * 8 - bln )
if k1 >= 1 and k1 < n :
return k1
k = hmac . new ( k , v + b'\x00' , hash_f ) . digest ( )
v = hmac . new ( k , v , hash_f ) . digest ( ) |
def reverse_subarray ( arr , until ) :
"""Function to reverse a portion of a list up to a particular position .
> > > reverse _ subarray ( [ 1 , 2 , 3 , 4 , 5 , 6 ] , 4)
[4 , 3 , 2 , 1 , 5 , 6]
> > > reverse _ subarray ( [ 4 , 5 , 6 , 7 ] , 2)
[5 , 4 , 6 , 7]
> > > reverse _ subarray ( [ 9 , 8 , 7 , 6 , 5 ] , 3)
[7 , 8 , 9 , 6 , 5]
Args :
arr : List to be modified .
until : Position up to which the list should be reversed .
Returns :
A list with its elements reversed up to the specified position .""" | return arr [ : until ] [ : : - 1 ] + arr [ until : ] |
def unwrap_stream ( stream_name ) :
"""Temporarily unwraps a given stream ( stdin , stdout , or stderr ) to undo the effects of wrap _ stdio _ in _ codecs ( ) .""" | wrapped_stream = None
try :
wrapped_stream = getattr ( sys , stream_name )
if hasattr ( wrapped_stream , '_original_stream' ) :
setattr ( sys , stream_name , wrapped_stream . _original_stream )
yield
finally :
if wrapped_stream :
setattr ( sys , stream_name , wrapped_stream ) |
def puppeteer ( ctx , port , auto_restart , args ) :
"""Run puppeteer fetcher if puppeteer is installed .""" | import subprocess
g = ctx . obj
_quit = [ ]
puppeteer_fetcher = os . path . join ( os . path . dirname ( pyspider . __file__ ) , 'fetcher/puppeteer_fetcher.js' )
cmd = [ 'node' , puppeteer_fetcher , str ( port ) ]
try :
_puppeteer = subprocess . Popen ( cmd )
except OSError :
logging . warning ( 'puppeteer not found, continue running without it.' )
return None
def quit ( * args , ** kwargs ) :
_quit . append ( 1 )
_puppeteer . kill ( )
_puppeteer . wait ( )
logging . info ( 'puppeteer exited.' )
if not g . get ( 'puppeteer_proxy' ) :
g [ 'puppeteer_proxy' ] = '127.0.0.1:%s' % port
puppeteer = utils . ObjectDict ( port = port , quit = quit )
g . instances . append ( puppeteer )
if g . get ( 'testing_mode' ) :
return puppeteer
while True :
_puppeteer . wait ( )
if _quit or not auto_restart :
break
_puppeteer = subprocess . Popen ( cmd ) |
def _parse_numbers ( text ) :
'''Convert a string to a number , allowing for a K | M | G | T postfix , 32.8K .
Returns a decimal number if the string is a real number ,
or the string unchanged otherwise .''' | if text . isdigit ( ) :
return decimal . Decimal ( text )
try :
postPrefixes = { 'K' : '10E3' , 'M' : '10E6' , 'G' : '10E9' , 'T' : '10E12' , 'P' : '10E15' , 'E' : '10E18' , 'Z' : '10E21' , 'Y' : '10E24' }
if text [ - 1 ] in postPrefixes . keys ( ) :
v = decimal . Decimal ( text [ : - 1 ] )
v = v * decimal . Decimal ( postPrefixes [ text [ - 1 ] ] )
return v
else :
return decimal . Decimal ( text )
except ValueError :
return text |
def set_buffer_options ( self , options , bufnr = None ) :
"""Set buffer - local options for a buffer , defaulting to current .
Args :
options ( dict ) :
Options to set , with keys being Vim option names . For Boolean
options , use a : class : ` bool ` value as expected , e . g .
` ` { ' buflisted ' : False } ` ` for ` ` setlocal nobuflisted ` ` .
bufnr ( Optional [ int ] ) :
A Vim buffer number , as you might get from VimL ` ` bufnr ( ' % ' ) ` `
or Python ` ` vim . current . buffer . number ` ` . If ` ` None ` ` , options
are set on the current buffer .""" | buf = self . _vim . buffers [ bufnr ] if bufnr else self . _vim . current . buffer
# Special case handling for filetype , see doc on ` ` set _ filetype ` `
filetype = options . pop ( 'filetype' , None )
if filetype :
self . set_filetype ( filetype )
for opt , value in options . items ( ) :
buf . options [ opt ] = value |
def _getH2singleTrait ( self , K , verbose = None ) :
"""Internal function for parameter initialization
estimate variance components and fixed effect using a linear mixed model with an intercept and 2 random effects ( one is noise )
Args :
K : covariance matrix of the non - noise random effect term""" | verbose = dlimix . getVerbose ( verbose )
# Fit single trait model
varg = sp . zeros ( self . P )
varn = sp . zeros ( self . P )
fixed = sp . zeros ( ( 1 , self . P ) )
for p in range ( self . P ) :
y = self . Y [ : , p : p + 1 ]
# check if some sull value
I = sp . isnan ( y [ : , 0 ] )
if I . sum ( ) > 0 :
y = y [ ~ I , : ]
_K = K [ ~ I , : ] [ : , ~ I ]
else :
_K = copy . copy ( K )
lmm = dlimix . CLMM ( )
lmm . setK ( _K )
lmm . setSNPs ( sp . ones ( ( y . shape [ 0 ] , 1 ) ) )
lmm . setPheno ( y )
lmm . setCovs ( sp . zeros ( ( y . shape [ 0 ] , 1 ) ) )
lmm . setVarcompApprox0 ( - 20 , 20 , 1000 )
lmm . process ( )
delta = sp . exp ( lmm . getLdelta0 ( ) [ 0 , 0 ] )
Vtot = sp . exp ( lmm . getLSigma ( ) [ 0 , 0 ] )
varg [ p ] = Vtot
varn [ p ] = delta * Vtot
fixed [ : , p ] = lmm . getBetaSNP ( )
if verbose :
print ( p )
sth = { }
sth [ 'varg' ] = varg
sth [ 'varn' ] = varn
sth [ 'fixed' ] = fixed
return sth |
def filter_out_empty ( tuples : list ) -> list :
"""Function to exclude empty tuples from a list of various tuples .
Args :
tuples ( list ) : A list of tuples of any length .
Returns :
list : A version of the input list minus any empty tuple entries .
Example :
> > > filter _ out _ empty ( [ ( ) , ( ) , ( ' ' , ) , ( ' a ' , ' b ' ) , ( ' a ' , ' b ' , ' c ' ) , ' d ' ] )
[ ( ' ' , ) , ( ' a ' , ' b ' ) , ( ' a ' , ' b ' , ' c ' ) , ' d ' ]
> > > filter _ out _ empty ( [ ( ) , ( ) , ( ' ' , ) , ' python ' , ' program ' ] )
[ ( ' ' , ) , ' python ' , ' program ' ]
> > > filter _ out _ empty ( [ ( ) , ( ) , ( ' ' , ) , ' java ' ] )
[ ( ' ' , ) , ' java ' ]""" | return [ t for t in tuples if t ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.