signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _CheckLogFileSize ( cursor ) :
"""Warns if MySQL log file size is not large enough for blob insertions .""" | # Do not fail , because users might not be able to change this for their
# database . Instead , warn the user about the impacts .
innodb_log_file_size = int ( _ReadVariable ( "innodb_log_file_size" , cursor ) )
required_size = 10 * mysql_blobs . BLOB_CHUNK_SIZE
if innodb_log_file_size < required_size : # See MySQL error 1118 : The size of BLOB / TEXT data inserted in one
# transaction is greater than 10 % of redo log size . Increase the redo log
# size using innodb _ log _ file _ size .
max_blob_size = innodb_log_file_size / 10
max_blob_size_mib = max_blob_size / 2 ** 20
logging . warning ( "MySQL innodb_log_file_size of %d is required, got %d. " "Storing Blobs bigger than %.4f MiB will fail." , required_size , innodb_log_file_size , max_blob_size_mib ) |
def _WsdlHasMethod ( self , method_name ) :
"""Determine if the wsdl contains a method .
Args :
method _ name : The name of the method to search .
Returns :
True if the method is in the WSDL , otherwise False .""" | return method_name in self . suds_client . wsdl . services [ 0 ] . ports [ 0 ] . methods |
def list_all_option_values ( cls , ** kwargs ) :
"""List OptionValues
Return a list of OptionValues
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ option _ values ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ OptionValue ]
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_option_values_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_option_values_with_http_info ( ** kwargs )
return data |
def reindex_axis ( self , new_index , axis , method = None , limit = None , fill_value = None , copy = True ) :
"""Conform block manager to new index .""" | new_index = ensure_index ( new_index )
new_index , indexer = self . axes [ axis ] . reindex ( new_index , method = method , limit = limit )
return self . reindex_indexer ( new_index , indexer , axis = axis , fill_value = fill_value , copy = copy ) |
def reset_uniforms ( self ) :
"""Resets the uniforms to the Mesh object to the " " global " " coordinate system""" | self . uniforms [ 'model_matrix' ] = self . model_matrix_global . view ( )
self . uniforms [ 'normal_matrix' ] = self . normal_matrix_global . view ( ) |
def subset_otf_from_ufo ( self , otf_path , ufo ) :
"""Subset a font using export flags set by glyphsLib .
There are two more settings that can change export behavior :
" Export Glyphs " and " Remove Glyphs " , which are currently not supported
for complexity reasons . See
https : / / github . com / googlei18n / glyphsLib / issues / 295.""" | from fontTools import subset
# ufo2ft always inserts a " . notdef " glyph as the first glyph
ufo_order = makeOfficialGlyphOrder ( ufo )
if ".notdef" not in ufo_order :
ufo_order . insert ( 0 , ".notdef" )
ot_order = TTFont ( otf_path ) . getGlyphOrder ( )
assert ot_order [ 0 ] == ".notdef"
assert len ( ufo_order ) == len ( ot_order )
for key in ( KEEP_GLYPHS_NEW_KEY , KEEP_GLYPHS_OLD_KEY ) :
keep_glyphs_list = ufo . lib . get ( key )
if keep_glyphs_list is not None :
keep_glyphs = set ( keep_glyphs_list )
break
else :
keep_glyphs = None
include = [ ]
for source_name , binary_name in zip ( ufo_order , ot_order ) :
if keep_glyphs and source_name not in keep_glyphs :
continue
if source_name in ufo :
exported = ufo [ source_name ] . lib . get ( GLYPH_EXPORT_KEY , True )
if not exported :
continue
include . append ( binary_name )
# copied from nototools . subset
opt = subset . Options ( )
opt . name_IDs = [ "*" ]
opt . name_legacy = True
opt . name_languages = [ "*" ]
opt . layout_features = [ "*" ]
opt . notdef_outline = True
opt . recalc_bounds = True
opt . recalc_timestamp = True
opt . canonical_order = True
opt . glyph_names = True
font = subset . load_font ( otf_path , opt , lazy = False )
subsetter = subset . Subsetter ( options = opt )
subsetter . populate ( glyphs = include )
subsetter . subset ( font )
subset . save_font ( font , otf_path , opt ) |
def sph2cart ( lon , lat ) :
"""Converts a longitude and latitude ( or sequence of lons and lats ) given in
_ radians _ to cartesian coordinates , ` x ` , ` y ` , ` z ` , where x = 0 , y = 0 , z = 0 is
the center of the globe .
Parameters
lon : array - like
Longitude in radians
lat : array - like
Latitude in radians
Returns
` x ` , ` y ` , ` z ` : Arrays of cartesian coordinates""" | x = np . cos ( lat ) * np . cos ( lon )
y = np . cos ( lat ) * np . sin ( lon )
z = np . sin ( lat )
return x , y , z |
def setWorkingCollisionBoundsInfo ( self , unQuadsCount ) :
"""Sets the Collision Bounds in the working copy .""" | fn = self . function_table . setWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t ( )
fn ( byref ( pQuadsBuffer ) , unQuadsCount )
return pQuadsBuffer |
def reshape ( tt_array , shape , eps = 1e-14 , rl = 1 , rr = 1 ) :
'''Reshape of the TT - vector
[ TT1 ] = TT _ RESHAPE ( TT , SZ ) reshapes TT - vector or TT - matrix into another
with mode sizes SZ , accuracy 1e - 14
[ TT1 ] = TT _ RESHAPE ( TT , SZ , EPS ) reshapes TT - vector / matrix into another with
mode sizes SZ and accuracy EPS
[ TT1 ] = TT _ RESHAPE ( TT , SZ , EPS , RL ) reshapes TT - vector / matrix into another
with mode size SZ and left tail rank RL
[ TT1 ] = TT _ RESHAPE ( TT , SZ , EPS , RL , RR ) reshapes TT - vector / matrix into
another with mode size SZ and tail ranks RL * RR
Reshapes TT - vector / matrix into a new one , with dimensions specified by SZ .
If the i _ nput is TT - matrix , SZ must have the sizes for both modes ,
so it is a _ matrix if sizes d2 - by - 2.
If the i _ nput is TT - vector , SZ may be either a column or a row _ vector .''' | tt1 = _cp . deepcopy ( tt_array )
sz = _cp . deepcopy ( shape )
ismatrix = False
if isinstance ( tt1 , _matrix . matrix ) :
d1 = tt1 . tt . d
d2 = sz . shape [ 0 ]
ismatrix = True
# The size should be [ n , m ] in R ^ { d x 2}
restn2_n = sz [ : , 0 ]
restn2_m = sz [ : , 1 ]
sz_n = _cp . copy ( sz [ : , 0 ] )
sz_m = _cp . copy ( sz [ : , 1 ] )
n1_n = tt1 . n
n1_m = tt1 . m
# We will split / convolve using the _ vector form anyway
sz = _np . prod ( sz , axis = 1 )
tt1 = tt1 . tt
else :
d1 = tt1 . d
d2 = len ( sz )
# Recompute sz to include r0 , rd ,
# and the items of tt1
sz [ 0 ] = sz [ 0 ] * rl
sz [ d2 - 1 ] = sz [ d2 - 1 ] * rr
tt1 . n [ 0 ] = tt1 . n [ 0 ] * tt1 . r [ 0 ]
tt1 . n [ d1 - 1 ] = tt1 . n [ d1 - 1 ] * tt1 . r [ d1 ]
if ismatrix : # in _ matrix : 1st tail rank goes to the n - mode , last to the m - mode
restn2_n [ 0 ] = restn2_n [ 0 ] * rl
restn2_m [ d2 - 1 ] = restn2_m [ d2 - 1 ] * rr
n1_n [ 0 ] = n1_n [ 0 ] * tt1 . r [ 0 ]
n1_m [ d1 - 1 ] = n1_m [ d1 - 1 ] * tt1 . r [ d1 ]
tt1 . r [ 0 ] = 1
tt1 . r [ d1 ] = 1
n1 = tt1 . n
assert _np . prod ( n1 ) == _np . prod ( sz ) , 'Reshape: incorrect sizes'
needQRs = False
if d2 > d1 :
needQRs = True
if d2 <= d1 :
i2 = 0
n2 = _cp . deepcopy ( sz )
for i1 in range ( d1 ) :
if n2 [ i2 ] == 1 :
i2 = i2 + 1
if i2 > d2 :
break
if n2 [ i2 ] % n1 [ i1 ] == 0 :
n2 [ i2 ] = n2 [ i2 ] // n1 [ i1 ]
else :
needQRs = True
break
r1 = tt1 . r
tt1 = tt1 . to_list ( tt1 )
if needQRs : # We have to split some cores - > perform QRs
for i in range ( d1 - 1 , 0 , - 1 ) :
cr = tt1 [ i ]
cr = _np . reshape ( cr , ( r1 [ i ] , n1 [ i ] * r1 [ i + 1 ] ) , order = 'F' )
[ cr , rv ] = _np . linalg . qr ( cr . T )
# Size n * r2 , r1new - r1nwe , r1
cr0 = tt1 [ i - 1 ]
cr0 = _np . reshape ( cr0 , ( r1 [ i - 1 ] * n1 [ i - 1 ] , r1 [ i ] ) , order = 'F' )
cr0 = _np . dot ( cr0 , rv . T )
# r0 * n0 , r1new
r1 [ i ] = cr . shape [ 1 ]
cr0 = _np . reshape ( cr0 , ( r1 [ i - 1 ] , n1 [ i - 1 ] , r1 [ i ] ) , order = 'F' )
cr = _np . reshape ( cr . T , ( r1 [ i ] , n1 [ i ] , r1 [ i + 1 ] ) , order = 'F' )
tt1 [ i ] = cr
tt1 [ i - 1 ] = cr0
r2 = _np . ones ( d2 + 1 , dtype = _np . int32 )
i1 = 0
# Working index in tt1
i2 = 0
# Working index in tt2
core2 = _np . zeros ( ( 0 ) )
curcr2 = 1
restn2 = sz
n2 = _np . ones ( d2 , dtype = _np . int32 )
if ismatrix :
n2_n = _np . ones ( d2 , dtype = _np . int32 )
n2_m = _np . ones ( d2 , dtype = _np . int32 )
while i1 < d1 :
curcr1 = tt1 [ i1 ]
if _gcd ( restn2 [ i2 ] , n1 [ i1 ] ) == n1 [ i1 ] : # The whole core1 fits to core2 . Convolve it
if ( i1 < d1 - 1 ) and ( needQRs ) : # QR to the next core - for safety
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] * n1 [ i1 ] , r1 [ i1 + 1 ] ) , order = 'F' )
[ curcr1 , rv ] = _np . linalg . qr ( curcr1 )
curcr12 = tt1 [ i1 + 1 ]
curcr12 = _np . reshape ( curcr12 , ( r1 [ i1 + 1 ] , n1 [ i1 + 1 ] * r1 [ i1 + 2 ] ) , order = 'F' )
curcr12 = _np . dot ( rv , curcr12 )
r1 [ i1 + 1 ] = curcr12 . shape [ 0 ]
tt1 [ i1 + 1 ] = _np . reshape ( curcr12 , ( r1 [ i1 + 1 ] , n1 [ i1 + 1 ] , r1 [ i1 + 2 ] ) , order = 'F' )
# Actually merge is here
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] , n1 [ i1 ] * r1 [ i1 + 1 ] ) , order = 'F' )
curcr2 = _np . dot ( curcr2 , curcr1 )
# size r21 * nold , dn * r22
if ismatrix : # Permute if we are working with tt _ matrix
curcr2 = _np . reshape ( curcr2 , ( r2 [ i2 ] , n2_n [ i2 ] , n2_m [ i2 ] , n1_n [ i1 ] , n1_m [ i1 ] , r1 [ i1 + 1 ] ) , order = 'F' )
curcr2 = _np . transpose ( curcr2 , [ 0 , 1 , 3 , 2 , 4 , 5 ] )
# Update the " matrix " sizes
n2_n [ i2 ] = n2_n [ i2 ] * n1_n [ i1 ]
n2_m [ i2 ] = n2_m [ i2 ] * n1_m [ i1 ]
restn2_n [ i2 ] = restn2_n [ i2 ] // n1_n [ i1 ]
restn2_m [ i2 ] = restn2_m [ i2 ] // n1_m [ i1 ]
r2 [ i2 + 1 ] = r1 [ i1 + 1 ]
# Update the sizes of tt2
n2 [ i2 ] = n2 [ i2 ] * n1 [ i1 ]
restn2 [ i2 ] = restn2 [ i2 ] // n1 [ i1 ]
curcr2 = _np . reshape ( curcr2 , ( r2 [ i2 ] * n2 [ i2 ] , r2 [ i2 + 1 ] ) , order = 'F' )
i1 = i1 + 1
# current core1 is over
else :
if ( _gcd ( restn2 [ i2 ] , n1 [ i1 ] ) != 1 ) or ( restn2 [ i2 ] == 1 ) : # There exists a nontrivial divisor , or a singleton requested
# Split it and convolve
n12 = _gcd ( restn2 [ i2 ] , n1 [ i1 ] )
if ismatrix : # Permute before the truncation
# _ matrix sizes we are able to split
n12_n = _gcd ( restn2_n [ i2 ] , n1_n [ i1 ] )
n12_m = _gcd ( restn2_m [ i2 ] , n1_m [ i1 ] )
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] , n12_n , n1_n [ i1 ] // n12_n , n12_m , n1_m [ i1 ] // n12_m , r1 [ i1 + 1 ] ) , order = 'F' )
curcr1 = _np . transpose ( curcr1 , [ 0 , 1 , 3 , 2 , 4 , 5 ] )
# Update the _ matrix sizes of tt2 and tt1
n2_n [ i2 ] = n2_n [ i2 ] * n12_n
n2_m [ i2 ] = n2_m [ i2 ] * n12_m
restn2_n [ i2 ] = restn2_n [ i2 ] // n12_n
restn2_m [ i2 ] = restn2_m [ i2 ] // n12_m
n1_n [ i1 ] = n1_n [ i1 ] // n12_n
n1_m [ i1 ] = n1_m [ i1 ] // n12_m
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] * n12 , ( n1 [ i1 ] // n12 ) * r1 [ i1 + 1 ] ) , order = 'F' )
[ u , s , v ] = _np . linalg . svd ( curcr1 , full_matrices = False )
r = _my_chop2 ( s , eps * _np . linalg . norm ( s ) / ( d2 - 1 ) ** 0.5 )
u = u [ : , : r ]
v = v . T
v = v [ : , : r ] * s [ : r ]
u = _np . reshape ( u , ( r1 [ i1 ] , n12 * r ) , order = 'F' )
# u is our admissible chunk , merge it to core2
curcr2 = _np . dot ( curcr2 , u )
# size r21 * nold , dn * r22
r2 [ i2 + 1 ] = r
# Update the sizes of tt2
n2 [ i2 ] = n2 [ i2 ] * n12
restn2 [ i2 ] = restn2 [ i2 ] // n12
curcr2 = _np . reshape ( curcr2 , ( r2 [ i2 ] * n2 [ i2 ] , r2 [ i2 + 1 ] ) , order = 'F' )
r1 [ i1 ] = r
# and tt1
n1 [ i1 ] = n1 [ i1 ] // n12
# keep v in tt1 for next operations
curcr1 = _np . reshape ( v . T , ( r1 [ i1 ] , n1 [ i1 ] , r1 [ i1 + 1 ] ) , order = 'F' )
tt1 [ i1 ] = curcr1
else : # Bad case . We have to merge cores of tt1 until a common
# divisor appears
i1new = i1 + 1
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] * n1 [ i1 ] , r1 [ i1 + 1 ] ) , order = 'F' )
while ( _gcd ( restn2 [ i2 ] , n1 [ i1 ] ) == 1 ) and ( i1new < d1 ) :
cr1new = tt1 [ i1new ]
cr1new = _np . reshape ( cr1new , ( r1 [ i1new ] , n1 [ i1new ] * r1 [ i1new + 1 ] ) , order = 'F' )
# size r1 ( i1 ) * n1 ( i1 ) , n1new * r1new
curcr1 = _np . dot ( curcr1 , cr1new )
if ismatrix : # Permutes and _ matrix size updates
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] , n1_n [ i1 ] , n1_m [ i1 ] , n1_n [ i1new ] , n1_m [ i1new ] , r1 [ i1new + 1 ] ) , order = 'F' )
curcr1 = _np . transpose ( curcr1 , [ 0 , 1 , 3 , 2 , 4 , 5 ] )
n1_n [ i1 ] = n1_n [ i1 ] * n1_n [ i1new ]
n1_m [ i1 ] = n1_m [ i1 ] * n1_m [ i1new ]
n1 [ i1 ] = n1 [ i1 ] * n1 [ i1new ]
curcr1 = _np . reshape ( curcr1 , ( r1 [ i1 ] * n1 [ i1 ] , r1 [ i1new + 1 ] ) , order = 'F' )
i1new = i1new + 1
# Inner cores merged = > squeeze tt1 data
n1 = _np . concatenate ( ( n1 [ : i1 ] , n1 [ i1new : ] ) )
r1 = _np . concatenate ( ( r1 [ : i1 ] , r1 [ i1new : ] ) )
tt1 [ i ] = _np . reshape ( curcr1 , ( r1 [ i1 ] , n1 [ i1 ] , r1 [ i1new ] ) , order = 'F' )
tt1 = tt1 [ : i1 ] + tt1 [ i1new : ]
d1 = len ( n1 )
if ( restn2 [ i2 ] == 1 ) and ( ( i1 >= d1 ) or ( ( i1 < d1 ) and ( n1 [ i1 ] != 1 ) ) ) : # The core of tt2 is finished
# The second condition prevents core2 from finishing until we
# squeeze all tailing singletons in tt1.
curcr2 = curcr2 . flatten ( order = 'F' )
core2 = _np . concatenate ( ( core2 , curcr2 ) )
i2 = i2 + 1
# Start new core2
curcr2 = 1
# If we have been asked for singletons - just add them
while ( i2 < d2 ) :
core2 = _np . concatenate ( ( core2 , _np . ones ( 1 ) ) )
r2 [ i2 ] = 1
i2 = i2 + 1
tt2 = ones ( 2 , 1 )
# dummy tensor
tt2 . d = d2
tt2 . n = n2
tt2 . r = r2
tt2 . core = core2
tt2 . ps = _np . int32 ( _np . cumsum ( _np . concatenate ( ( _np . ones ( 1 ) , r2 [ : - 1 ] * n2 * r2 [ 1 : ] ) ) ) )
tt2 . n [ 0 ] = tt2 . n [ 0 ] // rl
tt2 . n [ d2 - 1 ] = tt2 . n [ d2 - 1 ] // rr
tt2 . r [ 0 ] = rl
tt2 . r [ d2 ] = rr
if ismatrix :
ttt = eye ( 1 , 1 )
# dummy tt _ matrix
ttt . n = sz_n
ttt . m = sz_m
ttt . tt = tt2
return ttt
else :
return tt2 |
def _build ( self , inputs , ** normalization_build_kwargs ) :
"""Assembles the ` ConvNet2D ` and connects it to the graph .
Args :
inputs : A 4D Tensor of shape ` [ batch _ size , input _ height , input _ width ,
input _ channels ] ` .
* * normalization _ build _ kwargs : kwargs passed to the normalization module
at _ build time .
Returns :
A 4D Tensor of shape ` [ batch _ size , output _ height , output _ width ,
output _ channels [ - 1 ] ] ` .
Raises :
ValueError : If ` is _ training ` is not explicitly specified when using
batch normalization .""" | if ( self . _normalization_ctor in { batch_norm . BatchNorm , batch_norm_v2 . BatchNormV2 } and "is_training" not in normalization_build_kwargs ) :
raise ValueError ( "Boolean is_training flag must be explicitly specified " "when using batch normalization." )
self . _input_shape = tuple ( inputs . get_shape ( ) . as_list ( ) )
net = inputs
final_index = len ( self . _layers ) - 1
for i , layer in enumerate ( self . _layers ) :
net = layer ( net )
if i != final_index or self . _normalize_final :
if self . _normalization_ctor is not None : # The name ' batch _ norm ' is used even if something else like
# LayerNorm is being used . This is to avoid breaking old checkpoints .
normalizer = self . _normalization_ctor ( name = "batch_norm_{}" . format ( i ) , ** self . _normalization_kwargs )
net = normalizer ( net , ** util . remove_unsupported_kwargs ( normalizer , normalization_build_kwargs ) )
else :
if normalization_build_kwargs :
tf . logging . warning ( "No normalization configured, but extra kwargs " "provided: {}" . format ( normalization_build_kwargs ) )
if i != final_index or self . _activate_final :
net = self . _activation ( net )
return net |
def first_seen ( self , first_seen ) :
"""Set Document first seen .""" | self . _group_data [ 'firstSeen' ] = self . _utils . format_datetime ( first_seen , date_format = '%Y-%m-%dT%H:%M:%SZ' ) |
def export ( g , csv_fname ) :
"""export a graph to CSV for simpler viewing""" | with open ( csv_fname , "w" ) as f :
num_tuples = 0
f . write ( '"num","subject","predicate","object"\n' )
for subj , pred , obj in g :
num_tuples += 1
f . write ( '"' + str ( num_tuples ) + '",' )
f . write ( '"' + get_string_from_rdf ( subj ) + '",' )
f . write ( '"' + get_string_from_rdf ( pred ) + '",' )
f . write ( '"' + get_string_from_rdf ( obj ) + '"\n' )
print ( "Finished exporting " , num_tuples , " tuples" ) |
def reload_if_changed ( self , force = False ) :
"""If the file ( s ) being watched by this object have changed ,
their configuration will be loaded again using ` config _ loader ` .
Otherwise this is a noop .
: param force : If True ignore the ` min _ interval ` and proceed to
file modified comparisons . To force a reload use
: func : ` reload ` directly .""" | if ( force or self . should_check ) and self . file_modified ( ) :
return self . reload ( ) |
def delete_relationship ( self , json_data , relationship_field , related_id_field , view_kwargs ) :
"""Delete a relationship
: param dict json _ data : the request params
: param str relationship _ field : the model attribute used for relationship
: param str related _ id _ field : the identifier field of the related model
: param dict view _ kwargs : kwargs from the resource view""" | self . before_delete_relationship ( json_data , relationship_field , related_id_field , view_kwargs )
obj = self . get_object ( view_kwargs )
if obj is None :
url_field = getattr ( self , 'url_field' , 'id' )
filter_value = view_kwargs [ url_field ]
raise ObjectNotFound ( '{}: {} not found' . format ( self . model . __name__ , filter_value ) , source = { 'parameter' : url_field } )
if not hasattr ( obj , relationship_field ) :
raise RelationNotFound ( "{} has no attribute {}" . format ( obj . __class__ . __name__ , relationship_field ) )
related_model = getattr ( obj . __class__ , relationship_field ) . property . mapper . class_
updated = False
if isinstance ( json_data [ 'data' ] , list ) :
obj_ids = { str ( getattr ( obj__ , related_id_field ) ) for obj__ in getattr ( obj , relationship_field ) }
for obj_ in json_data [ 'data' ] :
if obj_ [ 'id' ] in obj_ids :
getattr ( obj , relationship_field ) . remove ( self . get_related_object ( related_model , related_id_field , obj_ ) )
updated = True
else :
setattr ( obj , relationship_field , None )
updated = True
try :
self . session . commit ( )
except JsonApiException as e :
self . session . rollback ( )
raise e
except Exception as e :
self . session . rollback ( )
raise JsonApiException ( "Delete relationship error: " + str ( e ) )
self . after_delete_relationship ( obj , updated , json_data , relationship_field , related_id_field , view_kwargs )
return obj , updated |
def save_all ( self ) :
"""Save all opened files .
Iterate through self . data and call save ( ) on any modified files .""" | for index in range ( self . get_stack_count ( ) ) :
if self . data [ index ] . editor . document ( ) . isModified ( ) :
self . save ( index ) |
def mixin ( self ) :
"""Add your own custom functions to the Underscore object , ensuring that
they ' re correctly added to the OOP wrapper as well .""" | methods = self . obj
for i , k in enumerate ( methods ) :
setattr ( underscore , k , methods [ k ] )
self . makeStatic ( )
return self . _wrap ( self . obj ) |
def _rename_with_content_disposition ( self , response : HTTPResponse ) :
'''Rename using the Content - Disposition header .''' | if not self . _filename :
return
if response . request . url_info . scheme not in ( 'http' , 'https' ) :
return
header_value = response . fields . get ( 'Content-Disposition' )
if not header_value :
return
filename = parse_content_disposition ( header_value )
if filename :
dir_path = os . path . dirname ( self . _filename )
new_filename = self . _path_namer . safe_filename ( filename )
self . _filename = os . path . join ( dir_path , new_filename ) |
def commit_input_persist ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
commit = ET . Element ( "commit" )
config = commit
input = ET . SubElement ( commit , "input" )
persist = ET . SubElement ( input , "persist" )
persist . text = kwargs . pop ( 'persist' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def accept_response ( self , response_header , content = EmptyValue , content_type = EmptyValue , accept_untrusted_content = False , localtime_offset_in_seconds = 0 , timestamp_skew_in_seconds = default_ts_skew_in_seconds , ** auth_kw ) :
"""Accept a response to this request .
: param response _ header :
A ` Hawk ` _ ` ` Server - Authorization ` ` header
such as one created by : class : ` mohawk . Receiver ` .
: type response _ header : str
: param content = EmptyValue : Byte string of the response body received .
: type content = EmptyValue : str
: param content _ type = EmptyValue :
Content - Type header value of the response received .
: type content _ type = EmptyValue : str
: param accept _ untrusted _ content = False :
When True , allow responses that do not hash their content .
Read : ref : ` skipping - content - checks ` to learn more .
: type accept _ untrusted _ content = False : bool
: param localtime _ offset _ in _ seconds = 0:
Seconds to add to local time in case it ' s out of sync .
: type localtime _ offset _ in _ seconds = 0 : float
: param timestamp _ skew _ in _ seconds = 60:
Max seconds until a message expires . Upon expiry ,
: class : ` mohawk . exc . TokenExpired ` is raised .
: type timestamp _ skew _ in _ seconds = 60 : float
. . _ ` Hawk ` : https : / / github . com / hueniverse / hawk""" | log . debug ( 'accepting response {header}' . format ( header = response_header ) )
parsed_header = parse_authorization_header ( response_header )
resource = Resource ( ext = parsed_header . get ( 'ext' , None ) , content = content , content_type = content_type , # The following response attributes are
# in reference to the original request ,
# not to the reponse header :
timestamp = self . req_resource . timestamp , nonce = self . req_resource . nonce , url = self . req_resource . url , method = self . req_resource . method , app = self . req_resource . app , dlg = self . req_resource . dlg , credentials = self . credentials , seen_nonce = self . seen_nonce )
self . _authorize ( 'response' , parsed_header , resource , # Per Node lib , a responder macs the * sender ' s * timestamp .
# It does not create its own timestamp .
# I suppose a slow response could time out here . Maybe only check
# mac failures , not timeouts ?
their_timestamp = resource . timestamp , timestamp_skew_in_seconds = timestamp_skew_in_seconds , localtime_offset_in_seconds = localtime_offset_in_seconds , accept_untrusted_content = accept_untrusted_content , ** auth_kw ) |
def create_transition ( self , from_state_id , from_outcome , to_state_id , to_outcome , transition_id ) :
"""Creates a new transition .
Lookout : Check the parameters first before creating a new transition
: param from _ state _ id : The source state of the transition
: param from _ outcome : The outcome of the source state to connect the transition to
: param to _ state _ id : The target state of the transition
: param to _ outcome : The target outcome of a container state
: param transition _ id : An optional transition id for the new transition
: raises exceptions . AttributeError : if the from or to state is incorrect
: return : the id of the new transition""" | # get correct states
if from_state_id is not None :
if from_state_id == self . state_id :
from_state = self
else :
from_state = self . states [ from_state_id ]
# finally add transition
if from_outcome is not None :
if from_outcome in from_state . outcomes :
if to_outcome is not None :
if to_outcome in self . outcomes : # if to _ state is None then the to _ outcome must be an outcome of self
self . transitions [ transition_id ] = Transition ( from_state_id , from_outcome , to_state_id , to_outcome , transition_id , self )
else :
raise AttributeError ( "to_state does not have outcome %s" , to_outcome )
else : # to outcome is None but to _ state is not None , so the transition is valid
self . transitions [ transition_id ] = Transition ( from_state_id , from_outcome , to_state_id , to_outcome , transition_id , self )
else :
raise AttributeError ( "from_state does not have outcome %s" , from_state )
else :
self . transitions [ transition_id ] = Transition ( None , None , to_state_id , to_outcome , transition_id , self )
# notify all states waiting for transition to be connected
self . _transitions_cv . acquire ( )
self . _transitions_cv . notify_all ( )
self . _transitions_cv . release ( )
return transition_id |
def _classify_target_compile_workflow ( self , target ) :
"""Return the compile workflow to use for this target .""" | if target . has_sources ( '.java' ) or target . has_sources ( '.scala' ) :
return self . get_scalar_mirrored_target_option ( 'workflow' , target )
return None |
def _display_tooltip ( self , tooltip , top ) :
"""Display tooltip at the specified top position .""" | QtWidgets . QToolTip . showText ( self . mapToGlobal ( QtCore . QPoint ( self . sizeHint ( ) . width ( ) , top ) ) , tooltip , self ) |
def add_widget ( self , w ) :
"""Convenience function""" | if self . layout ( ) :
self . layout ( ) . addWidget ( w )
else :
layout = QVBoxLayout ( self )
layout . addWidget ( w ) |
def boolean ( input ) :
"""Convert the given input to a boolean value .
Intelligently handles boolean and non - string values , returning
as - is and passing to the bool builtin respectively .
This process is case - insensitive .
Acceptable values :
True
* yes
* on
* true
False
* no
* off
* false
: param input : the value to convert to a boolean
: type input : any
: returns : converted boolean value
: rtype : bool""" | try :
input = input . strip ( ) . lower ( )
except AttributeError :
return bool ( input )
if input in ( 'yes' , 'y' , 'on' , 'true' , 't' , '1' ) :
return True
if input in ( 'no' , 'n' , 'off' , 'false' , 'f' , '0' ) :
return False
raise ValueError ( "Unable to convert {0!r} to a boolean value." . format ( input ) ) |
def should_ignore_rule ( self , rule ) :
"""Determines whether a rule should be ignored based on the general list of commits to ignore""" | return rule . id in self . config . ignore or rule . name in self . config . ignore |
def _add_matched_objects_to_database ( self , matchedObjects ) :
"""* add mathced objects to database *
* * Key Arguments : * *
- ` ` matchedObjects ` ` - - these objects matched in the neighbourhood of the ATLAS exposures ( list of dictionaries )""" | self . log . info ( 'starting the ``_add_matched_objects_to_database`` method' )
print "Adding the matched sources to the `pyephem_positions` database table"
allMatches = [ ]
for m in matchedObjects :
allMatches += m
dbSettings = self . settings [ "database settings" ] [ "atlasMovers" ]
insert_list_of_dictionaries_into_database_tables ( dbConn = self . atlasMoversDBConn , log = self . log , dictList = allMatches , dbTableName = "pyephem_positions" , uniqueKeyList = [ "expname" , "object_name" ] , dateModified = True , batchSize = 10000 , replace = True , dbSettings = dbSettings )
self . log . info ( 'completed the ``_add_matched_objects_to_database`` method' )
return None |
def get_latlon ( self , use_cached = True ) :
"""Get a tuple with device latitude and longitude . . . these may be None""" | device_json = self . get_device_json ( use_cached )
lat = device_json . get ( "dpMapLat" )
lon = device_json . get ( "dpMapLong" )
return ( float ( lat ) if lat else None , float ( lon ) if lon else None , ) |
def get_temp_and_dew ( wxdata : str ) -> ( [ str ] , Number , Number ) : # type : ignore
"""Returns the report list and removed temperature and dewpoint strings""" | for i , item in reversed ( list ( enumerate ( wxdata ) ) ) :
if '/' in item : # / / / 07
if item [ 0 ] == '/' :
item = '/' + item . lstrip ( '/' )
# 07 / / /
elif item [ - 1 ] == '/' :
item = item . rstrip ( '/' ) + '/'
tempdew = item . split ( '/' )
if len ( tempdew ) != 2 :
continue
valid = True
for j , temp in enumerate ( tempdew ) :
if temp in [ 'MM' , 'XX' ] :
tempdew [ j ] = ''
elif not is_possible_temp ( temp ) :
valid = False
break
if valid :
wxdata . pop ( i )
# type : ignore
return ( wxdata , * [ make_number ( t ) for t in tempdew ] )
return wxdata , None , None |
def get_or_create ( cls , * props , ** kwargs ) :
"""Call to MERGE with parameters map . A new instance will be created and saved if does not already exists ,
this is an atomic operation .
Parameters must contain all required properties , any non required properties with defaults will be generated .
Note that the post _ create hook isn ' t called after get _ or _ create
: param props : dict of properties to get or create the entities with .
: type props : tuple
: param relationship : Optional , relationship to get / create on when new entity is created .
: param lazy : False by default , specify True to get nodes with id only without the parameters .
: rtype : list""" | lazy = kwargs . get ( 'lazy' , False )
relationship = kwargs . get ( 'relationship' )
# build merge query
get_or_create_params = [ { "create" : cls . deflate ( p , skip_empty = True ) } for p in props ]
query , params = cls . _build_merge_query ( get_or_create_params , relationship = relationship , lazy = lazy )
if 'streaming' in kwargs :
warnings . warn ( 'streaming is not supported by bolt, please remove the kwarg' , category = DeprecationWarning , stacklevel = 1 )
# fetch and build instance for each result
results = db . cypher_query ( query , params )
return [ cls . inflate ( r [ 0 ] ) for r in results [ 0 ] ] |
def install ( ctx , services , delete_after_install = False ) :
"""Install a honeypot service from the online library , local path or zipfile .""" | logger . debug ( "running command %s (%s)" , ctx . command . name , ctx . params , extra = { "command" : ctx . command . name , "params" : ctx . params } )
home = ctx . obj [ "HOME" ]
services_path = os . path . join ( home , SERVICES )
installed_all_plugins = True
for service in services :
try :
plugin_utils . install_plugin ( service , SERVICE , services_path , register_service )
except exceptions . PluginAlreadyInstalled as exc :
click . echo ( exc )
installed_all_plugins = False
if not installed_all_plugins :
raise ctx . exit ( errno . EEXIST ) |
def setdefault ( elt , key , default , ctx = None ) :
"""Get a local property and create default value if local property does not
exist .
: param elt : local proprety elt to get / create . Not None methods .
: param str key : proprety name .
: param default : property value to set if key no in local properties .
: return : property value or default if property does not exist .""" | result = default
# get the best context
if ctx is None :
ctx = find_ctx ( elt = elt )
# get elt properties
elt_properties = _ctx_elt_properties ( elt = elt , ctx = ctx , create = True )
# if key exists in elt properties
if key in elt_properties : # result is elt _ properties [ key ]
result = elt_properties [ key ]
else : # set default property value
elt_properties [ key ] = default
return result |
def _dump_impl ( ) : # type : ( ) - > List [ FunctionData ]
"""Internal implementation for dump _ stats and dumps _ stats""" | filtered_signatures = _filter_types ( collected_signatures )
sorted_by_file = sorted ( iteritems ( filtered_signatures ) , key = ( lambda p : ( p [ 0 ] . path , p [ 0 ] . line , p [ 0 ] . func_name ) ) )
res = [ ]
# type : List [ FunctionData ]
for function_key , signatures in sorted_by_file :
comments = [ _make_type_comment ( args , ret_type ) for args , ret_type in signatures ]
res . append ( { 'path' : function_key . path , 'line' : function_key . line , 'func_name' : function_key . func_name , 'type_comments' : comments , 'samples' : num_samples . get ( function_key , 0 ) , } )
return res |
def evergreen ( self , included_channel_ids = None , excluded_channel_ids = None , ** kwargs ) :
"""Search containing any evergreen piece of Content .
: included _ channel _ ids list : Contains ids for channel ids relevant to the query .
: excluded _ channel _ ids list : Contains ids for channel ids excluded from the query .""" | eqs = self . search ( ** kwargs )
eqs = eqs . filter ( Evergreen ( ) )
if included_channel_ids :
eqs = eqs . filter ( VideohubChannel ( included_ids = included_channel_ids ) )
if excluded_channel_ids :
eqs = eqs . filter ( VideohubChannel ( excluded_ids = excluded_channel_ids ) )
return eqs |
def grow_slice ( slc , size ) :
"""Grow a slice object by 1 in each direction without overreaching the list .
Parameters
slc : slice
slice object to grow
size : int
list length
Returns
slc : slice
extended slice""" | return slice ( max ( 0 , slc . start - 1 ) , min ( size , slc . stop + 1 ) ) |
def get_default_renderer ( self , view ) :
"""Return an instance of the first valid renderer .
( Don ' t use another documenting renderer . )""" | renderers = [ renderer for renderer in view . renderer_classes if not issubclass ( renderer , BrowsableAPIRenderer ) ]
non_template_renderers = [ renderer for renderer in renderers if not hasattr ( renderer , 'get_template_names' ) ]
if not renderers :
return None
elif non_template_renderers :
return non_template_renderers [ 0 ] ( )
return renderers [ 0 ] ( ) |
def from_string ( string : str ) :
"""Creates a new Spec object from a given string .
: param string : The contents of a spec file .
: return : A new Spec object .""" | spec = Spec ( )
parse_context = { "current_subpackage" : None }
for line in string . splitlines ( ) :
spec , parse_context = _parse ( spec , parse_context , line )
return spec |
def _get_dst_dir ( dst_dir ) :
"""Prefix the provided string with working directory and return a
str .
: param dst _ dir : A string to be prefixed with the working dir .
: return : str""" | wd = os . getcwd ( )
_makedirs ( dst_dir )
return os . path . join ( wd , dst_dir ) |
def from_template ( args ) :
"""Create a new oct project from existing template
: param Namespace args : command line arguments""" | project_name = args . name
template = args . template
with tarfile . open ( template ) as tar :
prefix = os . path . commonprefix ( tar . getnames ( ) )
check_template ( tar . getnames ( ) , prefix )
tar . extractall ( project_name , members = get_members ( tar , prefix ) ) |
def destroy_decompress ( dinfo ) :
"""Wraps openjpeg library function opj _ destroy _ decompress .""" | argtypes = [ ctypes . POINTER ( DecompressionInfoType ) ]
OPENJPEG . opj_destroy_decompress . argtypes = argtypes
OPENJPEG . opj_destroy_decompress ( dinfo ) |
def clean ( self , text , guess = True , format = None , ** kwargs ) :
"""The classic : date parsing , every which way .""" | # handle date / datetime before converting to text .
date = self . _clean_datetime ( text )
if date is not None :
return date
text = stringify ( text )
if text is None :
return
if format is not None : # parse with a specified format
try :
obj = datetime . strptime ( text , format )
return obj . date ( ) . isoformat ( )
except Exception :
return None
if guess and not self . validate ( text ) : # use dateparser to guess the format
obj = self . fuzzy_date_parser ( text )
if obj is not None :
return obj . date ( ) . isoformat ( )
return self . _clean_text ( text ) |
def list_machine_group ( self , project_name , offset = 0 , size = 100 ) :
"""list machine group names in a project
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: type offset : int
: param offset : the offset of all group name
: type size : int
: param size : the max return names count , - 1 means all
: return : ListMachineGroupResponse
: raise : LogException""" | # need to use extended method to get more
if int ( size ) == - 1 or int ( size ) > MAX_LIST_PAGING_SIZE :
return list_more ( self . list_machine_group , int ( offset ) , int ( size ) , MAX_LIST_PAGING_SIZE , project_name )
headers = { }
params = { }
resource = "/machinegroups"
params [ 'offset' ] = str ( offset )
params [ 'size' ] = str ( size )
( resp , header ) = self . _send ( "GET" , project_name , None , resource , params , headers )
return ListMachineGroupResponse ( resp , header ) |
def filter_ordered_statistics ( ordered_statistics , ** kwargs ) :
"""Filter OrderedStatistic objects .
Arguments :
ordered _ statistics - - A OrderedStatistic iterable object .
Keyword arguments :
min _ confidence - - The minimum confidence of relations ( float ) .
min _ lift - - The minimum lift of relations ( float ) .""" | min_confidence = kwargs . get ( 'min_confidence' , 0.0 )
min_lift = kwargs . get ( 'min_lift' , 0.0 )
for ordered_statistic in ordered_statistics :
if ordered_statistic . confidence < min_confidence :
continue
if ordered_statistic . lift < min_lift :
continue
yield ordered_statistic |
def _root ( self ) :
"""Attribute referencing the root node of the tree .
: returns : the root node of the tree containing this instance .
: rtype : Node""" | _n = self
while _n . parent :
_n = _n . parent
return _n |
def from_coeff ( self , chebcoeff , domain = None , prune = True , vscale = 1. ) :
"""Initialise from provided coefficients
prune : Whether to prune the negligible coefficients
vscale : the scale to use when pruning""" | coeffs = np . asarray ( chebcoeff )
if prune :
N = self . _cutoff ( coeffs , vscale )
pruned_coeffs = coeffs [ : N ]
else :
pruned_coeffs = coeffs
values = self . polyval ( pruned_coeffs )
return self ( values , domain , vscale ) |
def get_function_in_models ( service , operation ) :
"""refers to definition of API in botocore , and autogenerates function
You can see example of elbv2 from link below .
https : / / github . com / boto / botocore / blob / develop / botocore / data / elbv2/2015-12-01 / service - 2 . json""" | client = boto3 . client ( service )
aws_operation_name = to_upper_camel_case ( operation )
op_model = client . _service_model . operation_model ( aws_operation_name )
inputs = op_model . input_shape . members
if not hasattr ( op_model . output_shape , 'members' ) :
outputs = { }
else :
outputs = op_model . output_shape . members
input_names = [ to_snake_case ( _ ) for _ in inputs . keys ( ) if _ not in INPUT_IGNORED_IN_BACKEND ]
output_names = [ to_snake_case ( _ ) for _ in outputs . keys ( ) if _ not in OUTPUT_IGNORED_IN_BACKEND ]
if input_names :
body = 'def {}(self, {}):\n' . format ( operation , ', ' . join ( input_names ) )
else :
body = 'def {}(self)\n'
body += ' # implement here\n'
body += ' return {}\n\n' . format ( ', ' . join ( output_names ) )
return body |
def _relative_to_abs_sls ( relative , sls ) :
'''Convert ` ` relative ` ` sls reference into absolute , relative to ` ` sls ` ` .''' | levels , suffix = re . match ( r'^(\.+)(.*)$' , relative ) . groups ( )
level_count = len ( levels )
p_comps = sls . split ( '.' )
if level_count > len ( p_comps ) :
raise SaltRenderError ( 'Attempted relative include goes beyond top level package' )
return '.' . join ( p_comps [ : - level_count ] + [ suffix ] ) |
def is_same ( type1 , type2 ) :
"""returns True , if type1 and type2 are same types""" | nake_type1 = remove_declarated ( type1 )
nake_type2 = remove_declarated ( type2 )
return nake_type1 == nake_type2 |
def monmap ( cluster , hostname ) :
"""Example usage : :
> > > from ceph _ deploy . util . paths import mon
> > > mon . monmap ( ' mycluster ' , ' myhostname ' )
/ var / lib / ceph / tmp / mycluster . myhostname . monmap""" | monmap
mon_map_file = '%s.%s.monmap' % ( cluster , hostname )
return join ( constants . tmp_path , mon_map_file ) |
def _get_pk_from_identity ( obj ) :
"""Copied / pasted , and fixed , from WTForms _ sqlalchemy due to issue w /
SQLAlchemy > = 1.2.""" | from sqlalchemy . orm . util import identity_key
cls , key = identity_key ( instance = obj ) [ 0 : 2 ]
return ":" . join ( text_type ( x ) for x in key ) |
def min ( self , spec ) :
"""Adds ` min ` operator that specifies lower bound for specific index .
: Parameters :
- ` spec ` : a list of field , limit pairs specifying the inclusive
lower bound for all keys of a specific index in order .
. . versionadded : : 2.7""" | if not isinstance ( spec , ( list , tuple ) ) :
raise TypeError ( "spec must be an instance of list or tuple" )
self . __check_okay_to_chain ( )
self . __min = SON ( spec )
return self |
def _set_auth_type ( self , v , load = False ) :
"""Setter method for auth _ type , mapped from YANG variable / routing _ system / interface / ve / ipv6 / ipv6 _ vrrp _ extended / auth _ type ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ auth _ type is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ auth _ type ( ) directly .
YANG Description : Authentication type""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = auth_type . auth_type , is_container = 'container' , presence = False , yang_name = "auth-type" , rest_name = "auth-type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Authentication type' } } , namespace = 'urn:brocade.com:mgmt:brocade-vrrp' , defining_module = 'brocade-vrrp' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """auth_type must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=auth_type.auth_type, is_container='container', presence=False, yang_name="auth-type", rest_name="auth-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Authentication type'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""" , } )
self . __auth_type = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def tablespace_list ( user = None , host = None , port = None , maintenance_db = None , password = None , runas = None ) :
'''Return dictionary with information about tablespaces of a Postgres server .
CLI Example :
. . code - block : : bash
salt ' * ' postgres . tablespace _ list
. . versionadded : : 2015.8.0''' | ret = { }
query = ( 'SELECT spcname as "Name", pga.rolname as "Owner", spcacl as "ACL", ' 'spcoptions as "Opts", pg_tablespace_location(pgts.oid) as "Location" ' 'FROM pg_tablespace pgts, pg_roles pga WHERE pga.oid = pgts.spcowner' )
rows = __salt__ [ 'postgres.psql_query' ] ( query , runas = runas , host = host , user = user , port = port , maintenance_db = maintenance_db , password = password )
for row in rows :
ret [ row [ 'Name' ] ] = row
ret [ row [ 'Name' ] ] . pop ( 'Name' )
return ret |
def dist_points ( bin_edges , d ) :
"""Return an array of values according to a distribution
Points are calculated at the center of each bin""" | bc = bin_centers ( bin_edges )
if d is not None :
d = DISTS [ d [ 'type' ] ] ( d , bc )
return d , bc |
def update ( self ) :
"""Update hook""" | super ( AnalysisRequestAnalysesView , self ) . update ( )
analyses = self . context . getAnalyses ( full_objects = True )
self . analyses = dict ( [ ( a . getServiceUID ( ) , a ) for a in analyses ] )
self . selected = self . analyses . keys ( ) |
def invalidation_hash ( self , fingerprint_strategy = None ) :
""": API : public""" | fingerprint_strategy = fingerprint_strategy or DefaultFingerprintStrategy ( )
if fingerprint_strategy not in self . _cached_fingerprint_map :
self . _cached_fingerprint_map [ fingerprint_strategy ] = self . compute_invalidation_hash ( fingerprint_strategy )
return self . _cached_fingerprint_map [ fingerprint_strategy ] |
def clear_worker_output ( self ) :
"""Drops all of the worker output collections
Args :
None
Returns :
Nothing""" | self . data_store . clear_worker_output ( )
# Have the plugin manager reload all the plugins
self . plugin_manager . load_all_plugins ( )
# Store information about commands and workbench
self . _store_information ( ) |
async def get_real_ext_ip ( self ) :
"""Return real external IP address .""" | while self . _ip_hosts :
try :
timeout = aiohttp . ClientTimeout ( total = self . _timeout )
async with aiohttp . ClientSession ( timeout = timeout , loop = self . _loop ) as session , session . get ( self . _pop_random_ip_host ( ) ) as resp :
ip = await resp . text ( )
except asyncio . TimeoutError :
pass
else :
ip = ip . strip ( )
if self . host_is_ip ( ip ) :
log . debug ( 'Real external IP: %s' , ip )
break
else :
raise RuntimeError ( 'Could not get the external IP' )
return ip |
def get_organization ( self , organization_id ) :
"""Get an organization for a given organization ID
: param organization _ id : str
: return : Organization""" | url = 'rest/servicedeskapi/organization/{}' . format ( organization_id )
return self . get ( url , headers = self . experimental_headers ) |
def collate_fonts_data ( fonts_data ) :
"""Collate individual fonts data into a single glyph data list .""" | glyphs = { }
for family in fonts_data :
for glyph in family :
if glyph [ 'unicode' ] not in glyphs :
glyphs [ glyph [ 'unicode' ] ] = glyph
else :
c = glyphs [ glyph [ 'unicode' ] ] [ 'contours' ]
glyphs [ glyph [ 'unicode' ] ] [ 'contours' ] = c | glyph [ 'contours' ]
return glyphs . values ( ) |
def _write ( self , frame ) :
"""Write a YubiKeyFrame to the USB HID .
Includes polling for YubiKey readiness before each write .""" | for data in frame . to_feature_reports ( debug = self . debug ) :
debug_str = None
if self . debug :
( data , debug_str ) = data
# first , we ensure the YubiKey will accept a write
self . _waitfor_clear ( yubikey_defs . SLOT_WRITE_FLAG )
self . _raw_write ( data , debug_str )
return True |
def str_to_datetime ( ts ) :
"""Format a string to a datetime object .
This functions supports several date formats like YYYY - MM - DD ,
MM - DD - YYYY , YY - MM - DD , YYYY - MM - DD HH : mm : SS + HH : MM , among others .
When the timezone is not provided , UTC + 0 will be set as default
( using ` dateutil . tz . tzutc ` object ) .
: param ts : string to convert
: returns : a datetime object
: raises IvalidDateError : when the given string cannot be converted
on a valid date""" | def parse_datetime ( ts ) :
dt = dateutil . parser . parse ( ts )
if not dt . tzinfo :
dt = dt . replace ( tzinfo = dateutil . tz . tzutc ( ) )
return dt
if not ts :
raise InvalidDateError ( date = str ( ts ) )
try : # Try to remove additional information after
# timezone section because it cannot be parsed ,
# like in ' Wed , 26 Oct 2005 15:20:32 - 0100 ( GMT + 1 ) '
# or in ' Thu , 14 Aug 2008 02:07:59 + 0200 CEST ' .
m = re . search ( r"^.+?\s+[\+\-\d]\d{4}(\s+.+)$" , ts )
if m :
ts = ts [ : m . start ( 1 ) ]
try :
dt = parse_datetime ( ts )
except ValueError as e : # Try to remove the timezone , usually it causes
# problems .
m = re . search ( r"^(.+?)\s+[\+\-\d]\d{4}.*$" , ts )
if m :
dt = parse_datetime ( m . group ( 1 ) )
logger . warning ( "Date %s str does not have a valid timezone" , ts )
logger . warning ( "Date converted removing timezone info" )
return dt
raise e
return dt
except ValueError as e :
raise InvalidDateError ( date = str ( ts ) ) |
def construct_stone_pile ( levels : int ) -> list :
"""Constructs a pyramid of stones where the number of stones on each level is determined by a rule .
If the total number of levels is odd , the number of stones on the next level is the next odd number .
If the total number of levels is even , the next level contains the next even number of stones .
Args :
levels : A positive integer representing the total number of levels in the pile .
Returns :
A list of integers where each integer represents the number of stones on that level
( index of list + 1 ) .
Example :
> > > construct _ stone _ pile ( 3)
[3 , 5 , 7]""" | return [ levels + 2 * i for i in range ( levels ) ] |
def forwards ( self , orm ) :
"Write your forwards methods here ." | PERM_CONF = { "publish_content" : "Can publish content" , "publish_own_content" : "Can publish own content" , "change_content" : "Can change content" , "promote_content" : "Can promote content" }
GROUP_CONF = dict ( contributor = ( ) , author = ( "publish_own_content" , ) , editor = ( "publish_content" , "change_content" , "promote_content" , ) , admin = ( "publish_content" , "change_content" , "promote_content" , ) )
content_ct , _ = orm [ "contenttypes.ContentType" ] . objects . get_or_create ( model = "content" , app_label = "content" )
for group_name , group_perms in GROUP_CONF . items ( ) :
group , _ = orm [ "auth.Group" ] . objects . get_or_create ( name = group_name )
for perm_name in group_perms :
perm , _ = orm [ "auth.Permission" ] . objects . get_or_create ( content_type = content_ct , codename = perm_name , defaults = { "name" : PERM_CONF [ perm_name ] } )
group . permissions . add ( perm ) |
def is_valid_interval ( self , lower , upper ) :
"""Return False if [ lower : upper ] is not a valid subitems interval . If
it is , then returns a tuple of ( lower index , upper index )""" | try :
lower_idx = self . data . index ( lower )
upper_idx = self . data . index ( upper )
return ( lower_idx , upper_idx ) if lower_idx <= upper_idx else False
except ValueError :
return False |
def remove_move ( name ) :
"""Remove item from six . moves .""" | try :
delattr ( _MovedItems , name )
except AttributeError :
try :
del moves . __dict__ [ name ]
except KeyError :
raise AttributeError ( "no such move, %r" % ( name , ) ) |
def logout ( self ) :
"""Log out of the account .""" | self . _master_token = None
self . _auth_token = None
self . _email = None
self . _android_id = None |
def publish_workflow_submission ( self , user_id , workflow_id_or_name , parameters ) :
"""Publish workflow submission parameters .""" | msg = { "user" : user_id , "workflow_id_or_name" : workflow_id_or_name , "parameters" : parameters }
self . _publish ( msg ) |
def transform_returns ( self , raw_lines , tre_return_grammar = None , use_mock = None , is_async = False ) :
"""Apply TCO , TRE , or async universalization to the given function .""" | lines = [ ]
# transformed lines
tco = False
# whether tco was done
tre = False
# whether tre was done
level = 0
# indentation level
disabled_until_level = None
# whether inside of a disabled block
attempt_tre = tre_return_grammar is not None
# whether to even attempt tre
attempt_tco = not is_async and not self . no_tco
# whether to even attempt tco
if is_async :
internal_assert ( not attempt_tre and not attempt_tco , "cannot tail call optimize async functions" )
for line in raw_lines :
indent , body , dedent = split_leading_trailing_indent ( line )
base , comment = split_comment ( body )
level += ind_change ( indent )
if disabled_until_level is not None :
if level <= disabled_until_level :
disabled_until_level = None
if disabled_until_level is None : # tco and tre don ' t support generators
if not is_async and self . yield_regex . search ( body ) :
lines = raw_lines
# reset lines
break
# don ' t touch inner functions
elif self . def_regex . match ( body ) :
disabled_until_level = level
# tco and tre shouldn ' t touch scopes that depend on actual return statements
# or scopes where we can ' t insert a continue
elif not is_async and self . tre_disable_regex . match ( body ) :
disabled_until_level = level
else :
if is_async :
if self . return_regex . match ( base ) :
to_return = base [ len ( "return" ) : ] . strip ( )
if to_return : # leave empty return statements alone
line = indent + "raise _coconut.asyncio.Return(" + to_return + ")" + comment + dedent
tre_base = None
if attempt_tre :
with self . complain_on_err ( ) :
tre_base = transform ( tre_return_grammar , base )
if tre_base is not None :
line = indent + tre_base + comment + dedent
tre = True
# when tco is available , tre falls back on it if the function is changed
tco = not self . no_tco
if attempt_tco and tre_base is None : # don ' t attempt tco if tre succeeded
tco_base = None
with self . complain_on_err ( ) :
tco_base = transform ( self . tco_return , base )
if tco_base is not None :
line = indent + tco_base + comment + dedent
tco = True
level += ind_change ( dedent )
lines . append ( line )
func_code = "" . join ( lines )
if is_async :
return func_code
else :
return func_code , tco , tre |
def predefinedEntity ( name ) :
"""Check whether this name is an predefined entity .""" | ret = libxml2mod . xmlGetPredefinedEntity ( name )
if ret is None :
raise treeError ( 'xmlGetPredefinedEntity() failed' )
return xmlEntity ( _obj = ret ) |
def load ( source , ** kwargs ) -> JsonObj :
"""Deserialize a JSON source .
: param source : a URI , File name or a . read ( ) - supporting file - like object containing a JSON document
: param kwargs : arguments . see : json . load for details
: return : JsonObj representing fp""" | if isinstance ( source , str ) :
if '://' in source :
req = Request ( source )
req . add_header ( "Accept" , "application/json, text/json;q=0.9" )
with urlopen ( req ) as response :
jsons = response . read ( )
else :
with open ( source ) as f :
jsons = f . read ( )
elif hasattr ( source , "read" ) :
jsons = source . read ( )
else :
raise TypeError ( "Unexpected type {} for source {}" . format ( type ( source ) , source ) )
return loads ( jsons , ** kwargs ) |
def untlpy2dcpy ( untl_elements , ** kwargs ) :
"""Convert the UNTL elements structure into a DC structure .
kwargs can be passed to the function for certain effects :
ark : Takes an ark string and creates an identifier element out of it .
domain _ name : Takes a domain string and creates an ark URL from it
( ark and domain _ name must be passed together to work properly ) .
resolve _ values : Converts abbreviated content into resolved vocabulary
labels .
resolve _ urls : Converts abbreviated content into resolved vocabulary
URLs .
verbose _ vocabularies : Uses the verbose vocabularies passed to the
function instead of this function being required to retrieve them .
# Create a DC Python object from a UNTL XML file .
from pyuntl . untldoc import untlxml2py
untl _ elements = untlxml2py ( untl _ filename ) # Or pass a file - like object .
# OR Create a DC Python object from a UNTL dictionary .
from pyuntl . untldoc import untldict2py
untl _ elements = untldict2py ( untl _ dict )
# Convert to UNTL Python object to DC Python object .
dc _ elements = untlpy2dcpy ( untl _ elements )
dc _ dict = dcpy2dict ( dc _ elements )
# Output DC in a specified string format .
from pyuntl . untldoc
import generate _ dc _ xml , generate _ dc _ json , generate _ dc _ txt
# Create a DC XML string .
generate _ dc _ xml ( dc _ dict )
# Create a DC JSON string .
generate _ dc _ json ( dc _ dict )
# Create a DC text string .
generate _ dc _ txt ( dc _ dict )""" | sDate = None
eDate = None
ark = kwargs . get ( 'ark' , None )
domain_name = kwargs . get ( 'domain_name' , None )
scheme = kwargs . get ( 'scheme' , 'http' )
resolve_values = kwargs . get ( 'resolve_values' , None )
resolve_urls = kwargs . get ( 'resolve_urls' , None )
verbose_vocabularies = kwargs . get ( 'verbose_vocabularies' , None )
# If either resolvers were requested , get the vocabulary data .
if resolve_values or resolve_urls :
if verbose_vocabularies : # If the vocabularies were passed to the function , use them .
vocab_data = verbose_vocabularies
else : # Otherwise , retrieve them using the pyuntl method .
vocab_data = retrieve_vocab ( )
else :
vocab_data = None
# Create the DC parent element .
dc_root = DC_CONVERSION_DISPATCH [ 'dc' ] ( )
for element in untl_elements . children : # Check if the UNTL element should be converted to DC .
if element . tag in DC_CONVERSION_DISPATCH : # Check if the element has its content stored in children nodes .
if element . children :
dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , children = element . children , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , )
# It is a normal element .
else :
dc_element = DC_CONVERSION_DISPATCH [ element . tag ] ( qualifier = element . qualifier , content = element . content , resolve_values = resolve_values , resolve_urls = resolve_urls , vocab_data = vocab_data , )
if element . tag == 'coverage' : # Handle start and end dates .
if element . qualifier == 'sDate' :
sDate = dc_element
elif element . qualifier == 'eDate' :
eDate = dc_element
# Otherwise , add the coverage element to the structure .
else :
dc_root . add_child ( dc_element )
# Add non coverage DC element to the structure .
elif dc_element :
dc_root . add_child ( dc_element )
# If the domain and ark were specified
# try to turn them into indentifier elements .
if ark and domain_name : # Create and add the permalink identifier .
permalink_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'permalink' , domain_name = domain_name , ark = ark , scheme = scheme )
dc_root . add_child ( permalink_identifier )
# Create and add the ark identifier .
ark_identifier = DC_CONVERSION_DISPATCH [ 'identifier' ] ( qualifier = 'ark' , content = ark , )
dc_root . add_child ( ark_identifier )
if sDate and eDate : # If a start and end date exist , combine them into one element .
dc_element = DC_CONVERSION_DISPATCH [ 'coverage' ] ( content = '%s-%s' % ( sDate . content , eDate . content ) , )
dc_root . add_child ( dc_element )
elif sDate :
dc_root . add_child ( sDate )
elif eDate :
dc_root . add_child ( eDate )
return dc_root |
def getCell ( self , row , width = None ) :
'Return DisplayWrapper for displayable cell value .' | cellval = wrapply ( self . getValue , row )
typedval = wrapply ( self . type , cellval )
if isinstance ( typedval , TypedWrapper ) :
if isinstance ( cellval , TypedExceptionWrapper ) : # calc failed
exc = cellval . exception
if cellval . forwarded :
dispval = str ( cellval )
# traceback . format _ exception _ only ( type ( exc ) , exc ) [ - 1 ] . strip ( )
else :
dispval = options . disp_error_val
return DisplayWrapper ( cellval . val , error = exc . stacktrace , display = dispval , note = options . note_getter_exc , notecolor = 'color_error' )
elif typedval . val is None : # early out for strict None
return DisplayWrapper ( None , display = '' , # force empty display for None
note = options . disp_note_none , notecolor = 'color_note_type' )
elif isinstance ( typedval , TypedExceptionWrapper ) : # calc succeeded , type failed
return DisplayWrapper ( typedval . val , display = str ( cellval ) , error = typedval . exception . stacktrace , note = options . note_type_exc , notecolor = 'color_warning' )
else :
return DisplayWrapper ( typedval . val , display = str ( typedval . val ) , note = options . note_type_exc , notecolor = 'color_warning' )
elif isinstance ( typedval , threading . Thread ) :
return DisplayWrapper ( None , display = options . disp_pending , note = options . note_pending , notecolor = 'color_note_pending' )
dw = DisplayWrapper ( cellval )
try :
dw . display = self . format ( typedval ) or ''
if width and isNumeric ( self ) :
dw . display = dw . display . rjust ( width - 1 )
# annotate cells with raw value type in anytype columns , except for strings
if self . type is anytype and type ( cellval ) is not str :
typedesc = typemap . get ( type ( cellval ) , None )
dw . note = typedesc . icon if typedesc else options . note_unknown_type
dw . notecolor = 'color_note_type'
except Exception as e : # formatting failure
e . stacktrace = stacktrace ( )
dw . error = e
try :
dw . display = str ( cellval )
except Exception as e :
dw . display = str ( e )
dw . note = options . note_format_exc
dw . notecolor = 'color_warning'
return dw |
def shutdown ( self ) :
"""shutdown""" | self . debug_log ( 'shutdown - start' )
# Only initiate shutdown once
if not self . shutdown_now :
self . debug_log ( 'shutdown - still shutting down' )
# Cancels the scheduled Timer , allows exit immediately
if self . timer :
self . timer . cancel ( )
self . timer = None
return
else :
self . debug_log ( 'shutdown - start - setting instance shutdown' )
self . shutdown_now = True
self . shutdown_event . set ( )
# if / else already shutting down
# Cancels the scheduled Timer , allows exit immediately
self . timer . cancel ( )
self . timer = None
self . debug_log ( 'shutdown - publishing remaining logs' )
if self . sleep_interval > 0 :
try :
self . build_payload_from_queued_messages ( use_queue = self . queue , shutdown_event = self . shutdown_event , triggered_by_shutdown = True )
except Exception as e :
self . write_log ( ( 'shutdown - failed to build a payload for remaining ' 'messages in queue Exception shutting down ' 'with ex={}' ) . format ( e ) )
self . debug_log ( 'publishing remaining logs' )
# Send the remaining items in the queue
self . publish_to_splunk ( )
# end of try to publish remaining messages in the queue
# during shutdown
self . debug_log ( 'shutdown - done' ) |
def reload_component ( self , name ) :
"""Reloads given Component .
: param name : Component name .
: type name : unicode
: return : Method success .
: rtype : bool""" | if not name in self . __engine . components_manager . components :
raise manager . exceptions . ComponentExistsError ( "{0} | '{1}' Component isn't registered in the Components Manager!" . format ( self . __class__ . __name__ , name ) )
component = self . __engine . components_manager . components [ name ]
LOGGER . debug ( "> Attempting '{0}' Component reload." . format ( component . name ) )
if component . interface . deactivatable :
dependents = list ( reversed ( self . __engine . components_manager . list_dependents ( component . name ) ) )
if filter ( lambda x : not self . __engine . components_manager [ x ] . deactivatable , dependents ) :
LOGGER . warning ( "!> {0} | '{1}' Component has non reloadable dependencies and won't be reloaded!" . format ( self . __class__ . __name__ , component . name ) )
return False
LOGGER . info ( "{0} | '{1}' Component dependents: '{2}'." . format ( self . __class__ . __name__ , component . name , ", " . join ( dependents ) ) )
LOGGER . debug ( "> Deactivating '{0}' Component dependents." . format ( component . name ) )
dependents . append ( component . name )
for dependent in dependents :
if self . __engine . components_manager [ dependent ] . activated :
self . deactivate_component ( dependent )
self . __engine . process_events ( )
LOGGER . debug ( "> Reloading '{0}' Component dependents." . format ( component . name ) )
self . __engine . components_manager . reload_component ( component . name )
LOGGER . debug ( "> Activating '{0}' Component dependents." . format ( component . name ) )
for dependent in reversed ( dependents ) :
if not self . __engine . components_manager [ dependent ] . activated :
self . activate_component ( dependent )
self . __engine . process_events ( )
LOGGER . info ( "{0} | '{1}' Component has been reloaded!" . format ( self . __class__ . __name__ , component . name ) )
self . reloaded_component . emit ( component . name )
return True
else :
raise manager . exceptions . ComponentReloadError ( "{0} | '{1}' Component cannot be deactivated and won't be reloaded!" . format ( self . __class__ . __name__ , component . name ) ) |
def update ( self , updatePortalParameters , clearEmptyFields = False ) :
"""The Update operation allows administrators only to update the
organization information such as name , description , thumbnail , and
featured groups .
Inputs :
updatePortalParamters - parameter . PortalParameters object that holds information to update
clearEmptyFields - boolean that clears all whitespace from fields""" | url = self . root + "/update"
params = { "f" : "json" , "clearEmptyFields" : clearEmptyFields }
if isinstance ( updatePortalParameters , parameters . PortalParameters ) :
params . update ( updatePortalParameters . value )
elif isinstance ( updatePortalParameters , dict ) :
for k , v in updatePortalParameters . items ( ) :
params [ k ] = v
else :
raise AttributeError ( "updatePortalParameters must be of type parameter.PortalParameters" )
return self . _post ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) |
def spop ( self , key , count = None , * , encoding = _NOTSET ) :
"""Remove and return one or multiple random members from a set .""" | args = [ key ]
if count is not None :
args . append ( count )
return self . execute ( b'SPOP' , * args , encoding = encoding ) |
def create_reaction ( self , reaction_type ) :
""": calls : ` POST / repos / : owner / : repo / issues / : number / reactions < https : / / developer . github . com / v3 / reactions > ` _
: param reaction _ type : string
: rtype : : class : ` github . Reaction . Reaction `""" | assert isinstance ( reaction_type , ( str , unicode ) ) , "reaction type should be a string"
assert reaction_type in [ "+1" , "-1" , "laugh" , "confused" , "heart" , "hooray" ] , "Invalid reaction type (https://developer.github.com/v3/reactions/#reaction-types)"
post_parameters = { "content" : reaction_type , }
headers , data = self . _requester . requestJsonAndCheck ( "POST" , self . url + "/reactions" , input = post_parameters , headers = { 'Accept' : Consts . mediaTypeReactionsPreview } )
return github . Reaction . Reaction ( self . _requester , headers , data , completed = True ) |
def put ( self , item : T , context : PipelineContext = None ) -> None :
"""Puts an objects into the data sink . The objects may be transformed into a new type for insertion if necessary .
Args :
item : The objects to be inserted into the data sink .
context : The context of the insertion ( mutable ) .""" | LOGGER . info ( "Converting item \"{item}\" for sink \"{sink}\"" . format ( item = item , sink = self . _sink ) )
item = self . _transform ( data = item , context = context )
LOGGER . info ( "Puting item \"{item}\" into sink \"{sink}\"" . format ( item = item , sink = self . _sink ) )
self . _sink . put ( self . _store_type , item , context ) |
def save_model ( self , request , obj , form , change ) :
'''Our custom addition to the view
adds an easy radio button choice for the new state . This is meant to be for tutors .
We need to peel this choice from the form data and set the state accordingly .
The radio buttons have no default , so that we can keep the existing state
if the user makes no explicit choice .
Everything else can be managed as prepared by the framework .''' | if 'newstate' in request . POST :
if request . POST [ 'newstate' ] == 'finished' :
obj . state = Submission . GRADED
elif request . POST [ 'newstate' ] == 'unfinished' :
obj . state = Submission . GRADING_IN_PROGRESS
obj . save ( ) |
def logstr ( self ) :
"""handler the log records to formatted string""" | result = [ ]
formater = LogFormatter ( color = False )
for record in self . logs :
if isinstance ( record , six . string_types ) :
result . append ( pretty_unicode ( record ) )
else :
if record . exc_info :
a , b , tb = record . exc_info
tb = hide_me ( tb , globals ( ) )
record . exc_info = a , b , tb
result . append ( pretty_unicode ( formater . format ( record ) ) )
result . append ( u'\n' )
return u'' . join ( result ) |
def replace_requirements ( self , infilename , outfile_initial = None ) :
"""Recursively replaces the requirements in the files with the content of the requirements .
Returns final temporary file opened for reading .""" | infile = open ( infilename , 'r' )
# extract the requirements for this file that were not skipped from the global database
_indexes = tuple ( z [ 0 ] for z in filter ( lambda x : x [ 1 ] == infilename , enumerate ( self . req_parents ) ) )
req_paths = tuple ( z [ 1 ] for z in filter ( lambda x : x [ 0 ] in _indexes , enumerate ( self . req_paths ) ) )
req_linenos = tuple ( z [ 1 ] for z in filter ( lambda x : x [ 0 ] in _indexes , enumerate ( self . req_linenos ) ) )
if outfile_initial :
outfile = outfile_initial
else :
outfile = tempfile . TemporaryFile ( 'w+' )
# write the input file to the output , replacing
# the requirement statements with the requirements themselves
for i , line in enumerate ( infile . readlines ( ) ) :
if i in req_linenos :
req_path = req_paths [ req_linenos . index ( i ) ]
# skip unresolved requirement
if not req_path :
continue
# recursion
req_file = self . replace_requirements ( req_path )
# insert something at cursor position
self . insert_requirement ( outfile , req_file , req_path )
req_file . close ( )
else :
outfile . write ( line )
infile . close ( )
if not outfile_initial :
outfile . seek ( 0 )
return outfile |
def handle_session_cookie ( self ) :
"""Handle JSESSIONID cookie logic""" | # If JSESSIONID support is disabled in the settings , ignore cookie logic
if not self . server . settings [ 'jsessionid' ] :
return
cookie = self . cookies . get ( 'JSESSIONID' )
if not cookie :
cv = 'dummy'
else :
cv = cookie . value
self . set_cookie ( 'JSESSIONID' , cv ) |
def announce_urls ( self , default = [ ] ) : # pylint : disable = dangerous - default - value
"""Get a list of all announce URLs .
Returns ` default ` if no trackers are found at all .""" | try :
response = self . _engine . _rpc . t . multicall ( self . _fields [ "hash" ] , 0 , "t.url=" , "t.is_enabled=" )
except xmlrpc . ERRORS as exc :
raise error . EngineError ( "While getting announce URLs for #%s: %s" % ( self . _fields [ "hash" ] , exc ) )
if response :
return [ i [ 0 ] for i in response if i [ 1 ] ]
else :
return default |
def query_ec_number ( ) :
"""Returns list of Enzyme Commission Numbers ( EC numbers ) by query parameters
tags :
- Query functions
parameters :
- name : ec _ number
in : query
type : string
required : false
description : Enzyme Commission Number
default : ' 1.1.1.1'
- name : entry _ name
in : query
type : string
required : false
description : UniProt entry name
default : ADHX _ HUMAN
- name : limit
in : query
type : integer
required : false
description : limit of results numbers
default : 10""" | args = get_args ( request_args = request . args , allowed_str_args = [ 'ec_number' , 'entry_name' ] , allowed_int_args = [ 'limit' ] )
return jsonify ( query . ec_number ( ** args ) ) |
def copy ( self ) :
"""Convert ` ` Dictator ` ` to standard ` ` dict ` ` object
> > > dc = Dictator ( )
> > > dc [ ' l0 ' ] = [ 1 , 2]
> > > dc [ ' 1 ' ] = ' abc '
> > > d = dc . copy ( )
> > > type ( d )
dict
{ ' l0 ' : [ ' 1 ' , ' 2 ' ] , ' 1 ' : ' abc ' }
> > > dc . clear ( )
: return : Python ' s dict object
: rtype : dict""" | logger . debug ( 'call to_dict' )
return { key : self . get ( key ) for key in self . keys ( ) } |
def _load ( self , url , verbose ) :
"""Execute a request against the Salesking API to fetch the items
: param url : url to fetch
: return response
: raises SaleskingException with the corresponding http errors""" | msg = u"_load url: %s" % url
self . _last_query_str = url
log . debug ( msg )
if verbose :
print msg
response = self . __api__ . request ( url )
return response |
def check ( self , url_data ) :
"""Try to ask GeoIP database for country info .""" | data = url_data . get_content ( )
infected , errors = scan ( data , self . clamav_conf )
if infected or errors :
for msg in infected :
url_data . add_warning ( u"Virus scan infection: %s" % msg )
for msg in errors :
url_data . add_warning ( u"Virus scan error: %s" % msg )
else :
url_data . add_info ( "No viruses in data found." ) |
def isom ( self , coolingFactor = None , EdgeAttribute = None , initialAdaptation = None , maxEpoch = None , minAdaptation = None , minRadius = None , network = None , NodeAttribute = None , nodeList = None , radius = None , radiusConstantTime = None , singlePartition = None , sizeFactor = None , verbose = None ) :
"""Execute the Inverted Self - Organizing Map Layout on a network .
: param coolingFactor ( string , optional ) : Cooling factor , in numeric value
: param EdgeAttribute ( string , optional ) : The name of the edge column contai
ning numeric values that will be used as weights in the layout algor
ithm . Only columns containing numeric values are shown
: param initialAdaptation ( string , optional ) : Initial adaptation , in numeric
value
: param maxEpoch ( string , optional ) : Number of iterations , in numeric value
: param minAdaptation ( string , optional ) : Minimum adaptation value , in numer
ic value
: param minRadius ( string , optional ) : Minimum radius , in numeric value
: param network ( string , optional ) : Specifies a network by name , or by SUID
if the prefix SUID : is used . The keyword CURRENT , or a blank value c
an also be used to specify the current network .
: param NodeAttribute ( string , optional ) : The name of the node column contai
ning numeric values that will be used as weights in the layout algor
ithm . Only columns containing numeric values are shown
: param nodeList ( string , optional ) : Specifies a list of nodes . The keywords
all , selected , or unselected can be used to specify nodes by their
selection state . The pattern COLUMN : VALUE sets this parameter to any
rows that contain the specified column value ; if the COLUMN prefix
is not used , the NAME column is matched by default . A list of COLUMN
: VALUE pairs of the format COLUMN1 : VALUE1 , COLUMN2 : VALUE2 , . . . can be
used to match multiple values .
: param radius ( string , optional ) : Radius , in numeric value
: param radiusConstantTime ( string , optional ) : Radius constant , in numeric v
alue
: param singlePartition ( string , optional ) : Don ' t partition graph before lay
out ; boolean values only , true or false ; defaults to false
: param sizeFactor ( string , optional ) : Size factor , in numeric value""" | network = check_network ( self , network , verbose = verbose )
PARAMS = set_param ( [ 'coolingFactor' , 'EdgeAttribute' , 'initialAdaptation' , 'maxEpoch' , 'minAdaptation' , 'minRadius' , 'network' , 'NodeAttribute' , 'nodeList' , 'radius' , 'radiusConstantTime' , 'singlePartition' , 'sizeFactor' ] , [ coolingFactor , EdgeAttribute , initialAdaptation , maxEpoch , minAdaptation , minRadius , network , NodeAttribute , nodeList , radius , radiusConstantTime , singlePartition , sizeFactor ] )
response = api ( url = self . __url + "/isom" , PARAMS = PARAMS , method = "POST" , verbose = verbose )
return response |
def uppass ( tree , feature ) :
"""UPPASS traverses the tree starting from the tips and going up till the root ,
and assigns to each parent node a state based on the states of its child nodes .
if N is a tip :
S ( N ) < - state of N
else :
L , R < - left and right children of N
UPPASS ( L )
UPPASS ( R )
if S ( L ) intersects with S ( R ) :
S ( N ) < - intersection ( S ( L ) , S ( R ) )
else :
S ( N ) < - union ( S ( L ) , S ( R ) )
: param tree : ete3 . Tree , the tree of interest
: param feature : str , character for which the parsimonious states are reconstructed
: return : void , adds get _ personalized _ feature _ name ( feature , BU _ PARS _ STATES ) feature to the tree nodes""" | ps_feature = get_personalized_feature_name ( feature , BU_PARS_STATES )
for node in tree . traverse ( 'postorder' ) :
if not node . is_leaf ( ) :
children_states = get_most_common_states ( getattr ( child , ps_feature ) for child in node . children )
node_states = getattr ( node , ps_feature )
state_intersection = node_states & children_states
node . add_feature ( ps_feature , state_intersection if state_intersection else node_states ) |
def from_array ( name , array , dim_names = None ) :
"""Construct a LIGO Light Weight XML Array document subtree from a
numpy array object .
Example :
> > > import numpy , sys
> > > a = numpy . arange ( 12 , dtype = " double " )
> > > a . shape = ( 4 , 3)
> > > from _ array ( u " test " , a ) . write ( sys . stdout ) # doctest : + NORMALIZE _ WHITESPACE
< Array Type = " real _ 8 " Name = " test : array " >
< Dim > 3 < / Dim >
< Dim > 4 < / Dim >
< Stream Delimiter = " " Type = " Local " >
0 3 6 9
1 4 7 10
2 5 8 11
< / Stream >
< / Array >""" | # Type must be set for . _ _ init _ _ ( ) ; easier to set Name afterwards
# to take advantage of encoding handled by attribute proxy
doc = Array ( Attributes ( { u"Type" : ligolwtypes . FromNumPyType [ str ( array . dtype ) ] } ) )
doc . Name = name
for n , dim in enumerate ( reversed ( array . shape ) ) :
child = ligolw . Dim ( )
if dim_names is not None :
child . Name = dim_names [ n ]
child . pcdata = unicode ( dim )
doc . appendChild ( child )
child = ArrayStream ( Attributes ( { u"Type" : ArrayStream . Type . default , u"Delimiter" : ArrayStream . Delimiter . default } ) )
doc . appendChild ( child )
doc . array = array
return doc |
def query ( self , query , * args , ** kwargs ) :
"""Run a statement on the database directly .
Allows for the execution of arbitrary read / write queries . A query can
either be a plain text string , or a ` SQLAlchemy expression
< http : / / docs . sqlalchemy . org / en / latest / core / tutorial . html # selecting > ` _ .
If a plain string is passed in , it will be converted to an expression
automatically .
Further positional and keyword arguments will be used for parameter
binding . To include a positional argument in your query , use question
marks in the query ( i . e . ` ` SELECT * FROM tbl WHERE a = ? ` ` ` ) . For
keyword arguments , use a bind parameter ( i . e . ` ` SELECT * FROM tbl
WHERE a = : foo ` ` ) .
statement = ' SELECT user , COUNT ( * ) c FROM photos GROUP BY user '
for row in db . query ( statement ) :
print ( row [ ' user ' ] , row [ ' c ' ] )
The returned iterator will yield each result sequentially .""" | if isinstance ( query , six . string_types ) :
query = text ( query )
_step = kwargs . pop ( '_step' , QUERY_STEP )
rp = self . executable . execute ( query , * args , ** kwargs )
return ResultIter ( rp , row_type = self . row_type , step = _step ) |
def useradd ( pwfile , user , password , opts = '' , runas = None ) :
'''Add a user to htpasswd file using the htpasswd command . If the htpasswd
file does not exist , it will be created .
pwfile
Path to htpasswd file
user
User name
password
User password
opts
Valid options that can be passed are :
- ` n ` Don ' t update file ; display results on stdout .
- ` m ` Force MD5 encryption of the password ( default ) .
- ` d ` Force CRYPT encryption of the password .
- ` p ` Do not encrypt the password ( plaintext ) .
- ` s ` Force SHA encryption of the password .
runas
The system user to run htpasswd command with
CLI Examples :
. . code - block : : bash
salt ' * ' webutil . useradd / etc / httpd / htpasswd larry badpassword
salt ' * ' webutil . useradd / etc / httpd / htpasswd larry badpass opts = ns''' | if not os . path . exists ( pwfile ) :
opts += 'c'
cmd = [ 'htpasswd' , '-b{0}' . format ( opts ) , pwfile , user , password ]
return __salt__ [ 'cmd.run_all' ] ( cmd , runas = runas , python_shell = False ) |
def generate_from_text ( self , text ) :
"""Generate wordcloud from text .
The input " text " is expected to be a natural text . If you pass a sorted
list of words , words will appear in your output twice . To remove this
duplication , set ` ` collocations = False ` ` .
Calls process _ text and generate _ from _ frequencies .
. . versionchanged : : 1.2.2
Argument of generate _ from _ frequencies ( ) is not return of
process _ text ( ) any more .
Returns
self""" | words = self . process_text ( text )
self . generate_from_frequencies ( words )
return self |
def get_view_nodes_from_indexes ( self , * indexes ) :
"""Returns the View Nodes from given indexes .
: param view : View .
: type view : QWidget
: param \ * indexes : Indexes .
: type \ * indexes : list
: return : View nodes .
: rtype : dict""" | nodes = { }
model = self . model ( )
if not model :
return nodes
if not hasattr ( model , "get_node" ) :
raise NotImplementedError ( "{0} | '{1}' Model doesn't implement a 'get_node' method!" . format ( __name__ , model ) )
if not hasattr ( model , "get_attribute" ) :
raise NotImplementedError ( "{0} | '{1}' Model doesn't implement a 'get_attribute' method!" . format ( __name__ , model ) )
for index in indexes :
node = model . get_node ( index )
if not node in nodes :
nodes [ node ] = [ ]
attribute = model . get_attribute ( node , index . column ( ) )
attribute and nodes [ node ] . append ( attribute )
return nodes |
def request_announcement_view ( request ) :
"""The request announcement page .""" | if request . method == "POST" :
form = AnnouncementRequestForm ( request . POST )
logger . debug ( form )
logger . debug ( form . data )
if form . is_valid ( ) :
teacher_objs = form . cleaned_data [ "teachers_requested" ]
logger . debug ( "teacher objs:" )
logger . debug ( teacher_objs )
if len ( teacher_objs ) > 2 :
messages . error ( request , "Please select a maximum of 2 teachers to approve this post." )
else :
obj = form . save ( commit = True )
obj . user = request . user
# SAFE HTML
obj . content = safe_html ( obj . content )
obj . save ( )
ann = AnnouncementRequest . objects . get ( id = obj . id )
logger . debug ( teacher_objs )
approve_self = False
for teacher in teacher_objs :
ann . teachers_requested . add ( teacher )
if teacher == request . user :
approve_self = True
ann . save ( )
if approve_self :
ann . teachers_approved . add ( teacher )
ann . save ( )
if settings . SEND_ANNOUNCEMENT_APPROVAL :
admin_request_announcement_email ( request , form , ann )
ann . admin_email_sent = True
ann . save ( )
return redirect ( "request_announcement_success_self" )
else :
if settings . SEND_ANNOUNCEMENT_APPROVAL :
request_announcement_email ( request , form , obj )
return redirect ( "request_announcement_success" )
return redirect ( "index" )
else :
messages . error ( request , "Error adding announcement request" )
else :
form = AnnouncementRequestForm ( )
return render ( request , "announcements/request.html" , { "form" : form , "action" : "add" } ) |
def canonical_chimera_labeling ( G , t = None ) :
"""Returns a mapping from the labels of G to chimera - indexed labeling .
Parameters
G : NetworkX graph
A Chimera - structured graph .
t : int ( optional , default 4)
Size of the shore within each Chimera tile .
Returns
chimera _ indices : dict
A mapping from the current labels to a 4 - tuple of Chimera indices .""" | adj = G . adj
if t is None :
if hasattr ( G , 'edges' ) :
num_edges = len ( G . edges )
else :
num_edges = len ( G . quadratic )
t = _chimera_shore_size ( adj , num_edges )
chimera_indices = { }
row = col = 0
root = min ( adj , key = lambda v : len ( adj [ v ] ) )
horiz , verti = rooted_tile ( adj , root , t )
while len ( chimera_indices ) < len ( adj ) :
new_indices = { }
if row == 0 : # if we ' re in the 0th row , we can assign the horizontal randomly
for si , v in enumerate ( horiz ) :
new_indices [ v ] = ( row , col , 0 , si )
else : # we need to match the row above
for v in horiz :
north = [ u for u in adj [ v ] if u in chimera_indices ]
assert len ( north ) == 1
i , j , u , si = chimera_indices [ north [ 0 ] ]
assert i == row - 1 and j == col and u == 0
new_indices [ v ] = ( row , col , 0 , si )
if col == 0 : # if we ' re in the 0th col , we can assign the vertical randomly
for si , v in enumerate ( verti ) :
new_indices [ v ] = ( row , col , 1 , si )
else : # we need to match the column to the east
for v in verti :
east = [ u for u in adj [ v ] if u in chimera_indices ]
assert len ( east ) == 1
i , j , u , si = chimera_indices [ east [ 0 ] ]
assert i == row and j == col - 1 and u == 1
new_indices [ v ] = ( row , col , 1 , si )
chimera_indices . update ( new_indices )
# get the next root
root_neighbours = [ v for v in adj [ root ] if v not in chimera_indices ]
if len ( root_neighbours ) == 1 : # we can increment the row
root = root_neighbours [ 0 ]
horiz , verti = rooted_tile ( adj , root , t )
row += 1
else : # need to go back to row 0 , and increment the column
assert not root_neighbours
# should be empty
# we want ( 0 , col , 1 , 0 ) , we could cache this , but for now let ' s just go look for it
# the slow way
vert_root = [ v for v in chimera_indices if chimera_indices [ v ] == ( 0 , col , 1 , 0 ) ] [ 0 ]
vert_root_neighbours = [ v for v in adj [ vert_root ] if v not in chimera_indices ]
if vert_root_neighbours :
verti , horiz = rooted_tile ( adj , vert_root_neighbours [ 0 ] , t )
root = next ( iter ( horiz ) )
row = 0
col += 1
return chimera_indices |
def get_download_url ( self , proapi = False ) :
"""Get this file ' s download URL
: param bool proapi : whether to use pro API""" | if self . _download_url is None :
self . _download_url = self . api . _req_files_download_url ( self . pickcode , proapi )
return self . _download_url |
def unify_partitions ( self ) :
"""For all of the segments for a partition , create the parent partition , combine the
children into the parent , and delete the children .""" | partitions = self . collect_segment_partitions ( )
# For each group , copy the segment partitions to the parent partitions , then
# delete the segment partitions .
with self . progress . start ( 'coalesce' , 0 , message = 'Coalescing partition segments' ) as ps :
for name , segments in iteritems ( partitions ) :
ps . add ( item_type = 'partitions' , item_count = len ( segments ) , message = 'Colescing partition {}' . format ( name ) )
self . unify_partition ( name , segments , ps ) |
def set_input ( self , filename , pass_to_command_line = True ) :
"""Add an input to the node by adding a - - input option .
@ param filename : option argument to pass as input .
@ bool pass _ to _ command _ line : add input as a variable option .""" | self . __input = filename
if pass_to_command_line :
self . add_var_opt ( 'input' , filename )
self . add_input_file ( filename ) |
def extract_message_value ( self , name ) :
'''search message to find and extract a named value''' | name += ":"
assert ( self . _message )
_start = self . _message . find ( name )
if _start >= 0 :
_start += len ( name ) + 1
_end = self . _message . find ( "\n" , _start )
_value = self . _message [ _start : _end ]
return _value . strip ( )
return None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.