signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def isUpdated ( self ) :
"""Figures out if the file had previously errored and hasn ' t
been fixed since given a numerical time""" | modified_time = self . getmtime ( )
valid = modified_time > self . __stamp
return valid |
def get_uri_name ( url ) :
"""Gets the file name from the end of the URL . Only useful for PyBEL ' s testing though since it looks specifically
if the file is from the weird owncloud resources distributed by Fraunhofer""" | url_parsed = urlparse ( url )
url_parts = url_parsed . path . split ( '/' )
log . info ( 'url parts: %s' , url_parts )
return url_parts [ - 1 ] |
def output ( self ) :
"""Returns the default workflow outputs in an ordered dictionary . At the moment this is just
the collection of outputs of the branch tasks , stored with the key ` ` " collection " ` ` .""" | if self . task . target_collection_cls is not None :
cls = self . task . target_collection_cls
elif self . task . outputs_siblings :
cls = SiblingFileCollection
else :
cls = TargetCollection
targets = luigi . task . getpaths ( self . task . get_branch_tasks ( ) )
collection = cls ( targets , threshold = self . threshold ( len ( targets ) ) )
return OrderedDict ( [ ( "collection" , collection ) ] ) |
def GameTypeEnum ( ctx ) :
"""Game Type Enumeration .""" | return Enum ( ctx , RM = 0 , Regicide = 1 , DM = 2 , Scenario = 3 , Campaign = 4 , KingOfTheHill = 5 , WonderRace = 6 , DefendTheWonder = 7 , TurboRandom = 8 ) |
def get_mysql_pool ( host = None , user = None , password = None , charset = 'utf8' , db = None , size = None , isSync = True , ioloop = None ) :
"""使用工厂方法返回一个连接池""" | factory = mysql_pool_factory ( isSync )
kwargs = { 'host' : host , 'user' : user , 'password' : password , 'charset' : charset , 'db' : db , 'size' : size , }
return factory ( ** kwargs ) |
def getCmdOpts ( self , text ) :
'''Use the _ cmd _ syntax def to split / parse / normalize the cmd line .
Args :
text ( str ) : Command to process .
Notes :
This is implemented independent of argparse ( et al ) due to the
need for syntax aware argument splitting . Also , allows different
split per command type
Returns :
dict : An opts dictionary .''' | off = 0
_ , off = s_syntax . nom ( text , off , s_syntax . whites )
name , off = s_syntax . meh ( text , off , s_syntax . whites )
_ , off = s_syntax . nom ( text , off , s_syntax . whites )
opts = { }
args = collections . deque ( [ synt for synt in self . _cmd_syntax if not synt [ 0 ] . startswith ( '-' ) ] )
switches = { synt [ 0 ] : synt for synt in self . _cmd_syntax if synt [ 0 ] . startswith ( '-' ) }
# populate defaults and lists
for synt in self . _cmd_syntax :
snam = synt [ 0 ] . strip ( '-' )
defval = synt [ 1 ] . get ( 'defval' )
if defval is not None :
opts [ snam ] = defval
if synt [ 1 ] . get ( 'type' ) in ( 'list' , 'kwlist' ) :
opts [ snam ] = [ ]
def atswitch ( t , o ) : # check if we are at a recognized switch . if not
# assume the data is part of regular arguments .
if not text . startswith ( '-' , o ) :
return None , o
name , x = s_syntax . meh ( t , o , s_syntax . whites )
swit = switches . get ( name )
if swit is None :
return None , o
return swit , x
while off < len ( text ) :
_ , off = s_syntax . nom ( text , off , s_syntax . whites )
swit , off = atswitch ( text , off )
if swit is not None :
styp = swit [ 1 ] . get ( 'type' , 'flag' )
snam = swit [ 0 ] . strip ( '-' )
if styp == 'valu' :
valu , off = s_syntax . parse_cmd_string ( text , off )
opts [ snam ] = valu
elif styp == 'list' :
valu , off = s_syntax . parse_cmd_string ( text , off )
if not isinstance ( valu , list ) :
valu = valu . split ( ',' )
opts [ snam ] . extend ( valu )
elif styp == 'enum' :
vals = swit [ 1 ] . get ( 'enum:vals' )
valu , off = s_syntax . parse_cmd_string ( text , off )
if valu not in vals :
raise s_exc . BadSyntax ( mesg = '%s (%s)' % ( swit [ 0 ] , '|' . join ( vals ) ) , text = text )
opts [ snam ] = valu
else :
opts [ snam ] = True
continue
if not args :
raise s_exc . BadSyntax ( mesg = 'trailing text: [%s]' % ( text [ off : ] , ) , text = text )
synt = args . popleft ( )
styp = synt [ 1 ] . get ( 'type' , 'valu' )
# a glob type eats the remainder of the string
if styp == 'glob' :
opts [ synt [ 0 ] ] = text [ off : ]
break
# eat the remainder of the string as separate vals
if styp == 'list' :
valu = [ ]
while off < len ( text ) :
item , off = s_syntax . parse_cmd_string ( text , off )
valu . append ( item )
opts [ synt [ 0 ] ] = valu
break
if styp == 'kwlist' :
kwlist , off = s_syntax . parse_cmd_kwlist ( text , off )
opts [ snam ] = kwlist
break
valu , off = s_syntax . parse_cmd_string ( text , off )
opts [ synt [ 0 ] ] = valu
return opts |
def send_reset_password_link ( request ) :
'''Send email with reset password link .''' | if not registration_settings . RESET_PASSWORD_VERIFICATION_ENABLED :
raise Http404 ( )
serializer = SendResetPasswordLinkSerializer ( data = request . data )
serializer . is_valid ( raise_exception = True )
login = serializer . validated_data [ 'login' ]
user = None
for login_field in get_login_fields ( ) :
user = get_user_by_lookup_dict ( { login_field : login } , default = None , require_verified = False )
if user :
break
if not user :
raise UserNotFound ( )
signer = ResetPasswordSigner ( { 'user_id' : user . pk , } , request = request )
template_config = ( registration_settings . RESET_PASSWORD_VERIFICATION_EMAIL_TEMPLATES )
send_verification_notification ( user , signer , template_config )
return get_ok_response ( 'Reset link sent' ) |
def run_numerical_analysis ( table , schema_list , args ) :
"""Find min / max values for the numerical columns and writes a json file .
Args :
table : Reference to FederatedTable ( if bigquery _ table is false ) or a
regular Table ( otherwise )
schema _ list : Bigquery schema json object
args : the command line args""" | import google . datalab . bigquery as bq
# Get list of numerical columns .
numerical_columns = [ ]
for col_schema in schema_list :
col_type = col_schema [ 'type' ] . lower ( )
if col_type == 'integer' or col_type == 'float' :
numerical_columns . append ( col_schema [ 'name' ] )
# Run the numerical analysis
if numerical_columns :
sys . stdout . write ( 'Running numerical analysis...' )
max_min = [ ( 'max({name}) as max_{name}, ' 'min({name}) as min_{name}, ' 'avg({name}) as avg_{name} ' ) . format ( name = name ) for name in numerical_columns ]
if args . bigquery_table :
sql = 'SELECT %s from `%s`' % ( ', ' . join ( max_min ) , parse_table_name ( args . bigquery_table ) )
numerical_results = bq . Query ( sql ) . execute ( ) . result ( ) . to_dataframe ( )
else :
sql = 'SELECT %s from csv_table' % ', ' . join ( max_min )
query = bq . Query ( sql , data_sources = { 'csv_table' : table } )
numerical_results = query . execute ( ) . result ( ) . to_dataframe ( )
# Convert the numerical results to a json file .
results_dict = { }
for name in numerical_columns :
results_dict [ name ] = { 'max' : numerical_results . iloc [ 0 ] [ 'max_%s' % name ] , 'min' : numerical_results . iloc [ 0 ] [ 'min_%s' % name ] , 'mean' : numerical_results . iloc [ 0 ] [ 'avg_%s' % name ] }
file_io . write_string_to_file ( os . path . join ( args . output_dir , NUMERICAL_ANALYSIS_FILE ) , json . dumps ( results_dict , indent = 2 , separators = ( ',' , ': ' ) ) )
sys . stdout . write ( 'done.\n' ) |
def updatePotentialRadius ( sp , newPotentialRadius ) :
"""Change the potential radius for all columns
: return :""" | oldPotentialRadius = sp . _potentialRadius
sp . _potentialRadius = newPotentialRadius
numColumns = np . prod ( sp . getColumnDimensions ( ) )
for columnIndex in xrange ( numColumns ) :
potential = sp . _mapPotential ( columnIndex )
sp . _potentialPools . replace ( columnIndex , potential . nonzero ( ) [ 0 ] )
sp . _updateInhibitionRadius ( ) |
def get_field_infos ( code , free_format ) :
"""Gets the list of pic fields information from line | start | to line | end | .
: param code : code to parse
: returns : the list of pic fields info found in the specified text .""" | offset = 0
field_infos = [ ]
lines = _clean_code ( code )
previous_offset = 0
for row in process_cobol ( lines , free_format ) :
fi = PicFieldInfo ( )
fi . name = row [ "name" ]
fi . level = row [ "level" ]
fi . pic = row [ "pic" ]
fi . occurs = row [ "occurs" ]
fi . redefines = row [ "redefines" ]
fi . indexed_by = row [ "indexed_by" ]
# find item that was redefined and use its offset
if fi . redefines :
for fib in field_infos :
if fib . name == fi . redefines :
offset = fib . offset
# level 1 should have their offset set to 1
if fi . level == 1 :
offset = 1
# level 78 have no offset
if fi . level == 78 :
offset = 0
# level 77 have offset always to 1
if fi . level == 77 :
offset = 1
# set item offset
fi . offset = offset
# special case : level 88 have the same level as its parent
if fi . level == 88 :
fi . offset = previous_offset
else :
previous_offset = offset
field_infos . append ( fi )
# compute offset of next PIC field .
if row [ 'pic' ] :
offset += row [ 'pic_info' ] [ 'length' ]
return field_infos |
def get_datafeed_stats ( self , datafeed_id = None , params = None ) :
"""` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / ml - get - datafeed - stats . html > ` _
: arg datafeed _ id : The ID of the datafeeds stats to fetch
: arg allow _ no _ datafeeds : Whether to ignore if a wildcard expression
matches no datafeeds . ( This includes ` _ all ` string or when no
datafeeds have been specified )""" | return self . transport . perform_request ( "GET" , _make_path ( "_ml" , "datafeeds" , datafeed_id , "_stats" ) , params = params ) |
def _validate_names ( self , name = None , names = None , deep = False ) :
"""Handles the quirks of having a singular ' name ' parameter for general
Index and plural ' names ' parameter for MultiIndex .""" | from copy import deepcopy
if names is not None and name is not None :
raise TypeError ( "Can only provide one of `names` and `name`" )
elif names is None and name is None :
return deepcopy ( self . names ) if deep else self . names
elif names is not None :
if not is_list_like ( names ) :
raise TypeError ( "Must pass list-like as `names`." )
return names
else :
if not is_list_like ( name ) :
return [ name ]
return name |
def addAction ( self , action ) :
"""Adds the inputed action to this toolbar .
: param action | < QAction >""" | super ( XDockToolbar , self ) . addAction ( action )
label = XDockActionLabel ( action , self . minimumPixmapSize ( ) , self )
label . setPosition ( self . position ( ) )
layout = self . layout ( )
layout . insertWidget ( layout . count ( ) - 1 , label ) |
def _correct_qualimap_genome_results ( samples ) :
"""fixing java . lang . Double . parseDouble error on entries like " 6,082.49" """ | for s in samples :
if verify_file ( s . qualimap_genome_results_fpath ) :
correction_is_needed = False
with open ( s . qualimap_genome_results_fpath , 'r' ) as f :
content = f . readlines ( )
metrics_started = False
for line in content :
if ">> Reference" in line :
metrics_started = True
if metrics_started :
if line . find ( ',' ) != - 1 :
correction_is_needed = True
break
if correction_is_needed :
with open ( s . qualimap_genome_results_fpath , 'w' ) as f :
metrics_started = False
for line in content :
if ">> Reference" in line :
metrics_started = True
if metrics_started :
if line . find ( ',' ) != - 1 :
line = line . replace ( ',' , '' )
f . write ( line ) |
def _encode_item ( self , item ) :
'''Encode an item object
@ requires : The object be serializable''' | if self . encoding . __name__ == 'pickle' :
return self . encoding . dumps ( item , protocol = - 1 )
else :
return self . encoding . dumps ( item ) |
def batchsd ( trace , batches = 5 ) :
"""Calculates the simulation standard error , accounting for non - independent
samples . The trace is divided into batches , and the standard deviation of
the batch means is calculated .""" | if len ( np . shape ( trace ) ) > 1 :
dims = np . shape ( trace )
# ttrace = np . transpose ( np . reshape ( trace , ( dims [ 0 ] , sum ( dims [ 1 : ] ) ) ) )
ttrace = np . transpose ( [ t . ravel ( ) for t in trace ] )
return np . reshape ( [ batchsd ( t , batches ) for t in ttrace ] , dims [ 1 : ] )
else :
if batches == 1 :
return np . std ( trace ) / np . sqrt ( len ( trace ) )
try :
batched_traces = np . resize ( trace , ( batches , int ( len ( trace ) / batches ) ) )
except ValueError : # If batches do not divide evenly , trim excess samples
resid = len ( trace ) % batches
batched_traces = np . resize ( trace [ : - resid ] , ( batches , len ( trace [ : - resid ] ) / batches ) )
means = np . mean ( batched_traces , 1 )
return np . std ( means ) / np . sqrt ( batches ) |
def on_directory_button_tool_clicked ( self ) :
"""Autoconnect slot activated when directory button is clicked .""" | # noinspection PyCallByClass , PyTypeChecker
# set up parameter from dialog
input_path = self . layer . currentLayer ( ) . source ( )
input_directory , self . output_filename = os . path . split ( input_path )
file_extension = os . path . splitext ( self . output_filename ) [ 1 ]
self . output_filename = os . path . splitext ( self . output_filename ) [ 0 ]
# show Qt file directory dialog
output_path , __ = QtWidgets . QFileDialog . getSaveFileName ( self , self . tr ( 'Output file' ) , '%s_multi_buffer%s' % ( os . path . join ( input_directory , self . output_filename ) , file_extension ) , 'GeoJSON (*.geojson);;Shapefile (*.shp)' )
# set selected path to the dialog
self . output_form . setText ( output_path ) |
def load_layer_without_provider ( layer_uri , layer_name = 'tmp' ) :
"""Helper to load a layer when don ' t know the driver .
Don ' t use it , it ' s an empiric function to try each provider one per one .
OGR / GDAL is printing a lot of error saying that the layer is not valid .
: param layer _ uri : Layer URI that will be used by QGIS to load the layer .
: type layer _ uri : basestring
: param layer _ name : Layer name to use . Default to ' tmp ' .
: type layer _ name : basestring
: return : The layer or None if it ' s failed .
: rtype : QgsMapLayer""" | # Let ' s try the most common vector driver
layer = QgsVectorLayer ( layer_uri , layer_name , VECTOR_DRIVERS [ 0 ] )
if layer . isValid ( ) :
return layer
# Let ' s try the most common raster driver
layer = QgsRasterLayer ( layer_uri , layer_name , RASTER_DRIVERS [ 0 ] )
if layer . isValid ( ) :
return layer
# Then try all other drivers
for driver in VECTOR_DRIVERS [ 1 : ] :
if driver == 'delimitedtext' : # Explicitly use URI with delimiter or tests fail in Windows . TS .
layer = QgsVectorLayer ( 'file:///%s?delimiter=,' % layer_uri , layer_name , driver )
if layer . isValid ( ) :
return layer
layer = QgsVectorLayer ( layer_uri , layer_name , driver )
if layer . isValid ( ) :
return layer
for driver in RASTER_DRIVERS [ 1 : ] :
layer = QgsRasterLayer ( layer_uri , layer_name , driver )
if layer . isValid ( ) :
return layer
return None |
def parse_request ( self , request , parameters = None , fake_method = None ) :
'''Parse WebOb request''' | return ( request . method , request . url , request . headers , request . POST . mixed ( ) ) |
def deinit ( bus = DEFAULT_SPI_BUS , chip_select = DEFAULT_SPI_CHIP_SELECT ) :
"""Stops interrupts on all boards . Only required when using
: func : ` digital _ read ` and : func : ` digital _ write ` .
: param bus : SPI bus / dev / spidev < bus > . < chipselect > ( default : { bus } )
: type bus : int
: param chip _ select : SPI chip select / dev / spidev < bus > . < chipselect >
( default : { chip } )
: type chip _ select : int""" | global _pifacedigitals
for pfd in _pifacedigitals :
try :
pfd . deinit_board ( )
except AttributeError :
pass |
def split_marker ( marker , fg_id = 1 , bg_id = 2 ) :
"""Splits an integer marker image into two binary image containing the foreground and
background markers respectively .
All encountered 1 ' s are hereby treated as foreground , all 2 ' s as background , all 0 ' s
as neutral marker and all others are ignored .
This behaviour can be changed by supplying the fg _ id and / or bg _ id parameters .
Parameters
marker : ndarray
The marker image .
fg _ id : integer
The value that should be treated as foreground .
bg _ id : integer
The value that should be treated as background .
Returns
fgmarkers , bgmarkers : nadarray
The fore - and background markers as boolean images .""" | img_marker = scipy . asarray ( marker )
img_fgmarker = scipy . zeros ( img_marker . shape , scipy . bool_ )
img_fgmarker [ img_marker == fg_id ] = True
img_bgmarker = scipy . zeros ( img_marker . shape , scipy . bool_ )
img_bgmarker [ img_marker == bg_id ] = True
return img_fgmarker , img_bgmarker |
def Lc ( self , value ) :
"""set col rotation""" | assert value . shape == ( self . P , self . P ) , 'dimension mismatch'
self . _Lc = value
self . clear_cache ( ) |
def has_module_perms ( self , user_obj , app_label ) :
"""Check if user have permission of specified app
Parameters
user _ obj : django user model instance
A django user model instance which be checked
app _ label : string
Django application name
Returns
boolean
Whether the specified user have any permissions of specified app""" | cache_name = "_has_module_perms_%s_%s_cache" % ( app_label , user_obj . pk )
if hasattr ( self , cache_name ) :
return getattr ( self , cache_name )
if self . app_label != app_label :
setattr ( self , cache_name , False )
else :
for permission in self . get_supported_permissions ( ) :
if user_obj . has_perm ( permission ) :
setattr ( self , cache_name , True )
return True
setattr ( self , cache_name , False )
return False |
def parse_rules ( data , chain ) :
"""Parse the rules for the specified chain .""" | rules = [ ]
for line in data . splitlines ( True ) :
m = re_rule . match ( line )
if m and m . group ( 3 ) == chain :
rule = parse_rule ( m . group ( 4 ) )
rule . packets = int ( m . group ( 1 ) )
rule . bytes = int ( m . group ( 2 ) )
rules . append ( rule )
return rules |
def _decode_input_tensor_to_features_dict ( feature_map , hparams ) :
"""Convert the interactive input format ( see above ) to a dictionary .
Args :
feature _ map : dict with inputs .
hparams : model hyperparameters
Returns :
a features dictionary , as expected by the decoder .""" | inputs = tf . convert_to_tensor ( feature_map [ "inputs" ] )
input_is_image = False
x = inputs
p_hparams = hparams . problem_hparams
# Add a third empty dimension
x = tf . expand_dims ( x , axis = [ 2 ] )
x = tf . to_int32 ( x )
input_space_id = tf . constant ( p_hparams . input_space_id )
target_space_id = tf . constant ( p_hparams . target_space_id )
features = { }
features [ "input_space_id" ] = input_space_id
features [ "target_space_id" ] = target_space_id
features [ "decode_length" ] = ( IMAGE_DECODE_LENGTH if input_is_image else tf . shape ( x ) [ 1 ] + 50 )
features [ "inputs" ] = x
return features |
def check_expression ( testing_framework , expression_dict ) :
"""> > > class mock _ framework :
. . . def assertIn ( self , item , list , msg = " Failed asserting item is in list " ) :
. . . if item not in list : raise Exception ( msg )
. . . def assertTrue ( self , value , msg = " Failed asserting true " ) :
. . . if not value : raise Exception ( msg )
. . . def assertFalse ( self , value , msg ) : self . assertTrue ( not value , msg )
> > > check _ expression ( mock _ framework ( ) ,
. . . { ' class ' : { ' group ' : { ' Matches ' : " 0 | 1 " , ' Non - Matches ' : " 2 | 0 2 " , ' Expression ' : " [ 0-1 ] " } } } )""" | expression_sub = get_expression_sub ( )
for expression_type_name , expression_type in expression_dict . items ( ) :
for name , expression_object in expression_type . items ( ) :
if 'Matches' in expression_object . keys ( ) :
for test in expression_object [ 'Matches' ] . split ( '|' ) : # Substitute and check to make sure that the entire string matches
result = expression_sub ( expression_object [ 'Expression' ] , '' , test . strip ( ) ) == ''
testing_framework . assertTrue ( result , match_error_msg . format ( expression_type_name , name , test ) )
if 'Non-Matches' in expression_object . keys ( ) :
for test in expression_object [ 'Non-Matches' ] . split ( '|' ) :
result = expression_sub ( expression_object [ 'Expression' ] , '' , test . strip ( ) ) == ''
testing_framework . assertFalse ( result , non_match_error_msg . format ( expression_type_name , name , test ) ) |
def Channels ( module ) :
'''Returns the channels contained in the given K2 module .''' | nums = { 2 : 1 , 3 : 5 , 4 : 9 , 6 : 13 , 7 : 17 , 8 : 21 , 9 : 25 , 10 : 29 , 11 : 33 , 12 : 37 , 13 : 41 , 14 : 45 , 15 : 49 , 16 : 53 , 17 : 57 , 18 : 61 , 19 : 65 , 20 : 69 , 22 : 73 , 23 : 77 , 24 : 81 }
if module in nums :
return [ nums [ module ] , nums [ module ] + 1 , nums [ module ] + 2 , nums [ module ] + 3 ]
else :
return None |
def _send ( self , messagetype , packet ) :
"""Send the GNTP Packet""" | packet . validate ( )
data = packet . encode ( )
logger . debug ( 'To : %s:%s <%s>\n%s' , self . hostname , self . port , packet . __class__ , data )
s = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
s . settimeout ( self . socketTimeout )
try :
s . connect ( ( self . hostname , self . port ) )
s . send ( data )
recv_data = s . recv ( 1024 )
while not recv_data . endswith ( gntp . shim . b ( "\r\n\r\n" ) ) :
recv_data += s . recv ( 1024 )
except socket . error : # Python2.5 and Python3 compatibile exception
exc = sys . exc_info ( ) [ 1 ]
raise errors . NetworkError ( exc )
response = gntp . core . parse_gntp ( recv_data )
s . close ( )
logger . debug ( 'From : %s:%s <%s>\n%s' , self . hostname , self . port , response . __class__ , response )
if type ( response ) == gntp . core . GNTPOK :
return True
logger . error ( 'Invalid response: %s' , response . error ( ) )
return response . error ( ) |
def verify_message ( data_to_verify , signature , verify_cert ) :
"""Function parses an ASN . 1 encrypted message and extracts / decrypts
the original message .
: param data _ to _ verify :
A byte string of the data to be verified against the signature .
: param signature : A CMS ASN . 1 byte string containing the signature .
: param verify _ cert : The certificate to be used for verifying the signature .
: return : The digest algorithm that was used in the signature .""" | cms_content = cms . ContentInfo . load ( signature )
digest_alg = None
if cms_content [ 'content_type' ] . native == 'signed_data' :
for signer in cms_content [ 'content' ] [ 'signer_infos' ] :
signed_attributes = signer [ 'signed_attrs' ] . copy ( )
digest_alg = signer [ 'digest_algorithm' ] [ 'algorithm' ] . native
if digest_alg not in DIGEST_ALGORITHMS :
raise Exception ( 'Unsupported Digest Algorithm' )
sig_alg = signer [ 'signature_algorithm' ] [ 'algorithm' ] . native
sig = signer [ 'signature' ] . native
signed_data = data_to_verify
if signed_attributes :
attr_dict = { }
for attr in signed_attributes . native :
attr_dict [ attr [ 'type' ] ] = attr [ 'values' ]
message_digest = byte_cls ( )
for d in attr_dict [ 'message_digest' ] :
message_digest += d
digest_func = hashlib . new ( digest_alg )
digest_func . update ( data_to_verify )
calc_message_digest = digest_func . digest ( )
if message_digest != calc_message_digest :
raise IntegrityError ( 'Failed to verify message signature: ' 'Message Digest does not match.' )
signed_data = signed_attributes . untag ( ) . dump ( )
try :
if sig_alg == 'rsassa_pkcs1v15' :
asymmetric . rsa_pkcs1v15_verify ( verify_cert , sig , signed_data , digest_alg )
elif sig_alg == 'rsassa_pss' :
asymmetric . rsa_pss_verify ( verify_cert , sig , signed_data , digest_alg )
else :
raise AS2Exception ( 'Unsupported Signature Algorithm' )
except Exception as e :
raise IntegrityError ( 'Failed to verify message signature: {}' . format ( e ) )
return digest_alg |
def dir ( base_dir : str , rr_id : str ) -> str :
"""Return correct subdirectory of input base dir for artifacts corresponding to input rev reg id .
: param base _ dir : base directory for tails files , thereafter split by cred def id
: param rr _ id : rev reg id""" | LOGGER . debug ( 'Tails.dir >>> base_dir: %s, rr_id: %s' , base_dir , rr_id )
if not ok_rev_reg_id ( rr_id ) :
LOGGER . debug ( 'Tails.dir <!< Bad rev reg id %s' , rr_id )
raise BadIdentifier ( 'Bad rev reg id {}' . format ( rr_id ) )
rv = join ( base_dir , rev_reg_id2cred_def_id ( rr_id ) )
LOGGER . debug ( 'Tails.dir <<< %s' , rv )
return rv |
async def _make_qr ( self , qr : QuickRepliesList . BaseOption , request : Request ) :
"""Generate a single quick reply ' s content .""" | if isinstance ( qr , QuickRepliesList . TextOption ) :
return { 'content_type' : 'text' , 'title' : await render ( qr . text , request ) , 'payload' : qr . slug , }
elif isinstance ( qr , QuickRepliesList . LocationOption ) :
return { 'content_type' : 'location' , } |
def create_embedded_class ( self , method ) :
"""Build the estimator class .
Returns
: return : string
The built class as string .""" | temp_class = self . temp ( 'embedded.class' )
return temp_class . format ( class_name = self . class_name , method_name = self . method_name , method = method , n_features = self . n_features ) |
def describe ( df , dtype = None ) :
"""Print a description of a Pandas dataframe .
Parameters
df : Pandas . DataFrame
dtype : dict
Maps column names to types""" | if dtype is None :
dtype = { }
print ( 'Number of datapoints: {datapoints}' . format ( datapoints = len ( df ) ) )
column_info , column_info_meta = _get_column_info ( df , dtype )
if len ( column_info [ 'int' ] ) > 0 :
_describe_int ( df , column_info )
if len ( column_info [ 'float' ] ) > 0 :
_describe_float ( df , column_info )
if len ( column_info [ 'category' ] ) > 0 :
_describe_category ( df , column_info , column_info_meta )
if len ( column_info [ 'time' ] ) > 0 :
_describe_time ( df , column_info , column_info_meta )
if len ( column_info [ 'other' ] ) > 0 :
_describe_other ( df , column_info , column_info_meta )
column_types = { }
for column_type , columns in column_info . items ( ) :
for column_name in columns :
if column_type == 'other' :
column_type = 'str'
column_types [ column_name ] = column_type
return column_types |
def get_reservations_for_booking_ids ( self , booking_ids ) :
"""Gets booking information for a given list of booking ids .
: param booking _ ids : a booking id or a list of room ids ( comma separated ) .
: type booking _ ids : string""" | try :
resp = self . _request ( "GET" , "/1.1/space/booking/{}" . format ( booking_ids ) )
except resp . exceptions . HTTPError as error :
raise APIError ( "Server Error: {}" . format ( error ) )
return resp . json ( ) |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_local_interface_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_lldp_neighbor_detail = ET . Element ( "get_lldp_neighbor_detail" )
config = get_lldp_neighbor_detail
output = ET . SubElement ( get_lldp_neighbor_detail , "output" )
lldp_neighbor_detail = ET . SubElement ( output , "lldp-neighbor-detail" )
remote_interface_name_key = ET . SubElement ( lldp_neighbor_detail , "remote-interface-name" )
remote_interface_name_key . text = kwargs . pop ( 'remote_interface_name' )
local_interface_name = ET . SubElement ( lldp_neighbor_detail , "local-interface-name" )
local_interface_name . text = kwargs . pop ( 'local_interface_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def compute_route_time_series ( feed : "Feed" , trip_stats_subset : DataFrame , dates : List [ str ] , freq : str = "5Min" , * , split_directions : bool = False , ) -> DataFrame :
"""Compute route stats in time series form for the trips that lie in
the trip stats subset and that start on the given dates .
Parameters
feed : Feed
trip _ stats _ subset : DataFrame
Slice of the output of : func : ` . trips . compute _ trip _ stats `
dates : string or list
A YYYYMMDD date string or list thereof indicating the date ( s )
for which to compute stats
split _ directions : boolean
If ` ` True ` ` , then separate each routes ' s stats by trip direction ;
otherwise aggregate trips in both directions
freq : Pandas frequency string
Specifices the frequency with which to resample the time series ;
max frequency is one minute ( ' Min ' )
Returns
DataFrame
Same format as output by : func : ` compute _ route _ time _ series _ base `
but with multiple dates
Exclude dates that lie outside of the Feed ' s date range .
If all dates lie outside the Feed ' s date range , then return an
empty DataFrame .
Notes
- See the notes for : func : ` compute _ route _ time _ series _ base `
- Assume the following feed attributes are not ` ` None ` ` :
* Those used in : func : ` . trips . get _ trips `
- Raise a ValueError if ` ` split _ directions ` ` and no non - NaN
direction ID values present""" | dates = feed . restrict_dates ( dates )
if not dates :
return pd . DataFrame ( )
activity = feed . compute_trip_activity ( dates )
ts = trip_stats_subset . copy ( )
# Collect stats for each date , memoizing stats by trip ID sequence
# to avoid unnecessary re - computations .
# Store in dictionary of the form
# trip ID sequence - >
# [ stats DataFarme , date list that stats apply ]
stats_and_dates_by_ids = { }
null_stats = build_null_route_time_series ( feed , split_directions = split_directions , freq = freq )
for date in dates :
ids = tuple ( activity . loc [ activity [ date ] > 0 , "trip_id" ] )
if ids in stats_and_dates_by_ids : # Append date to date list
stats_and_dates_by_ids [ ids ] [ 1 ] . append ( date )
elif not ids : # Null stats
stats_and_dates_by_ids [ ids ] = [ null_stats , [ date ] ]
else : # Compute stats
t = ts [ ts [ "trip_id" ] . isin ( ids ) ] . copy ( )
stats = compute_route_time_series_base ( t , split_directions = split_directions , freq = freq , date_label = date , )
# Remember stats
stats_and_dates_by_ids [ ids ] = [ stats , [ date ] ]
# Assemble stats into DataFrame
frames = [ ]
for stats , dates_ in stats_and_dates_by_ids . values ( ) :
for date in dates_ :
f = stats . copy ( )
# Replace date
d = hp . datestr_to_date ( date )
f . index = f . index . map ( lambda t : t . replace ( year = d . year , month = d . month , day = d . day ) )
frames . append ( f )
f = pd . concat ( frames ) . sort_index ( ) . sort_index ( axis = 1 , sort_remaining = True )
if len ( dates ) > 1 : # Insert missing dates and NaNs to complete series index
end_datetime = pd . to_datetime ( dates [ - 1 ] + " 23:59:59" )
new_index = pd . date_range ( dates [ 0 ] , end_datetime , freq = freq )
f = f . reindex ( new_index )
else : # Set frequency
f . index . freq = pd . tseries . frequencies . to_offset ( freq )
return f |
def _encode_var ( cls , var ) :
"""Encodes a variable to the appropriate string format for ini files .
: param var : The variable to encode
: return : The ini representation of the variable
: rtype : str""" | if isinstance ( var , str ) :
if any ( _ in var for _ in cls . requires_quotes ) : # NOTE : quoted strings should just use ' " ' according to the spec
return '"' + var . replace ( '"' , '\\"' ) + '"'
return var
else :
return str ( var ) |
def command ( self , command ) :
"""Run a command on the currently active container .
: rtype : CommandReply""" | return self . _conn . command ( '[con_id="{}"] {}' . format ( self . id , command ) ) |
def _run_info_from_yaml ( dirs , run_info_yaml , config , sample_names = None , is_cwl = False , integrations = None ) :
"""Read run information from a passed YAML file .""" | validate_yaml ( run_info_yaml , run_info_yaml )
with open ( run_info_yaml ) as in_handle :
loaded = yaml . safe_load ( in_handle )
fc_name , fc_date = None , None
if dirs . get ( "flowcell" ) :
try :
fc_name , fc_date = flowcell . parse_dirname ( dirs . get ( "flowcell" ) )
except ValueError :
pass
global_config = { }
global_vars = { }
resources = { }
integration_config = { }
if isinstance ( loaded , dict ) :
global_config = copy . deepcopy ( loaded )
del global_config [ "details" ]
if "fc_name" in loaded :
fc_name = loaded [ "fc_name" ] . replace ( " " , "_" )
if "fc_date" in loaded :
fc_date = str ( loaded [ "fc_date" ] ) . replace ( " " , "_" )
global_vars = global_config . pop ( "globals" , { } )
resources = global_config . pop ( "resources" , { } )
for iname in [ "arvados" ] :
integration_config [ iname ] = global_config . pop ( iname , { } )
loaded = loaded [ "details" ]
if sample_names :
loaded = [ x for x in loaded if x [ "description" ] in sample_names ]
if integrations :
for iname , retriever in integrations . items ( ) :
if iname in config :
config [ iname ] = retriever . set_cache ( config [ iname ] )
loaded = retriever . add_remotes ( loaded , config [ iname ] )
run_details = [ ]
for i , item in enumerate ( loaded ) :
item = _normalize_files ( item , dirs . get ( "flowcell" ) )
if "lane" not in item :
item [ "lane" ] = str ( i + 1 )
item [ "lane" ] = _clean_characters ( item [ "lane" ] )
if "description" not in item :
if _item_is_bam ( item ) :
item [ "description" ] = get_sample_name ( item [ "files" ] [ 0 ] )
else :
raise ValueError ( "No `description` sample name provided for input #%s" % ( i + 1 ) )
description = _clean_characters ( item [ "description" ] )
item [ "description" ] = description
# make names R safe if we are likely to use R downstream
if item [ "analysis" ] . lower ( ) in R_DOWNSTREAM_ANALYSIS :
if description [ 0 ] . isdigit ( ) :
valid = "X" + description
logger . info ( "%s is not a valid R name, converting to %s." % ( description , valid ) )
item [ "description" ] = valid
if "upload" not in item and not is_cwl :
upload = global_config . get ( "upload" , { } )
# Handle specifying a local directory directly in upload
if isinstance ( upload , six . string_types ) :
upload = { "dir" : upload }
if not upload :
upload [ "dir" ] = "../final"
if fc_name :
upload [ "fc_name" ] = fc_name
if fc_date :
upload [ "fc_date" ] = fc_date
upload [ "run_id" ] = ""
if upload . get ( "dir" ) :
upload [ "dir" ] = _file_to_abs ( upload [ "dir" ] , [ dirs . get ( "work" ) ] , makedir = True )
item [ "upload" ] = upload
item [ "algorithm" ] = _replace_global_vars ( item [ "algorithm" ] , global_vars )
item [ "algorithm" ] = genome . abs_file_paths ( item [ "algorithm" ] , ignore_keys = ALGORITHM_NOPATH_KEYS , fileonly_keys = ALGORITHM_FILEONLY_KEYS , do_download = all ( not x for x in integrations . values ( ) ) )
item [ "genome_build" ] = str ( item . get ( "genome_build" , "" ) )
item [ "algorithm" ] = _add_algorithm_defaults ( item [ "algorithm" ] , item . get ( "analysis" , "" ) , is_cwl )
item [ "metadata" ] = add_metadata_defaults ( item . get ( "metadata" , { } ) )
item [ "rgnames" ] = prep_rg_names ( item , config , fc_name , fc_date )
if item . get ( "files" ) :
item [ "files" ] = [ genome . abs_file_paths ( f , do_download = all ( not x for x in integrations . values ( ) ) ) for f in item [ "files" ] ]
elif "files" in item :
del item [ "files" ]
if item . get ( "vrn_file" ) and isinstance ( item [ "vrn_file" ] , six . string_types ) :
item [ "vrn_file" ] = genome . abs_file_paths ( item [ "vrn_file" ] , do_download = all ( not x for x in integrations . values ( ) ) )
if os . path . isfile ( item [ "vrn_file" ] ) : # Try to prepare in place ( or use ready to go inputs )
try :
item [ "vrn_file" ] = vcfutils . bgzip_and_index ( item [ "vrn_file" ] , config , remove_orig = False )
# In case of permission errors , fix in inputs directory
except IOError :
inputs_dir = utils . safe_makedir ( os . path . join ( dirs . get ( "work" , os . getcwd ( ) ) , "inputs" , item [ "description" ] ) )
item [ "vrn_file" ] = vcfutils . bgzip_and_index ( item [ "vrn_file" ] , config , remove_orig = False , out_dir = inputs_dir )
if not tz . get_in ( ( "metadata" , "batch" ) , item ) and tz . get_in ( [ "algorithm" , "validate" ] , item ) :
raise ValueError ( "%s: Please specify a metadata batch for variant file (vrn_file) input.\n" % ( item [ "description" ] ) + "Batching with a standard sample provides callable regions for validation." )
item = _clean_metadata ( item )
item = _clean_algorithm ( item )
item = _organize_tools_on ( item , is_cwl )
item = _clean_background ( item )
# Add any global resource specifications
if "resources" not in item :
item [ "resources" ] = { }
for prog , pkvs in resources . items ( ) :
if prog not in item [ "resources" ] :
item [ "resources" ] [ prog ] = { }
if pkvs is not None :
for key , val in pkvs . items ( ) :
item [ "resources" ] [ prog ] [ key ] = val
for iname , ivals in integration_config . items ( ) :
if ivals :
if iname not in item :
item [ iname ] = { }
for k , v in ivals . items ( ) :
item [ iname ] [ k ] = v
run_details . append ( item )
_check_sample_config ( run_details , run_info_yaml , config )
return run_details |
def get_port ( self , adapter_number , port_number ) :
"""Return the port for this adapter _ number and port _ number
or returns None if the port is not found""" | for port in self . ports :
if port . adapter_number == adapter_number and port . port_number == port_number :
return port
return None |
def connect_after ( self , detailed_signal , handler , * args ) :
"""connect _ after ( detailed _ signal : str , handler : function , * args ) - > handler _ id : int
The connect _ after ( ) method is similar to the connect ( ) method
except that the handler is added to the signal handler list after
the default class signal handler . Otherwise the details of handler
definition and invocation are the same .""" | flags = GConnectFlags . CONNECT_AFTER
return self . __connect ( flags , detailed_signal , handler , * args ) |
def is_prime ( n ) :
"""Return True if x is prime , False otherwise .
We use the Miller - Rabin test , as given in Menezes et al . p . 138.
This test is not exact : there are composite values n for which
it returns True .
In testing the odd numbers from 1000001 to 199999,
about 66 composites got past the first test ,
5 got past the second test , and none got past the third .
Since factors of 2 , 3 , 5 , 7 , and 11 were detected during
preliminary screening , the number of numbers tested by
Miller - Rabin was ( 199999 - 1000001 ) * ( 2/3 ) * ( 4/5 ) * ( 6/7)
= 4.57 million .""" | # ( This is used to study the risk of false positives : )
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes [ - 1 ] :
if n in smallprimes :
return True
else :
return False
if gcd ( n , 2 * 3 * 5 * 7 * 11 ) != 1 :
return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2 * * - 80
# ( from Menezes et al . Table 4.4 ) :
t = 40
n_bits = 1 + int ( math . log ( n , 2 ) )
for k , tt in ( ( 100 , 27 ) , ( 150 , 18 ) , ( 200 , 15 ) , ( 250 , 12 ) , ( 300 , 9 ) , ( 350 , 8 ) , ( 400 , 7 ) , ( 450 , 6 ) , ( 550 , 5 ) , ( 650 , 4 ) , ( 850 , 3 ) , ( 1300 , 2 ) , ) :
if n_bits < k :
break
t = tt
# Run the test t times :
s = 0
r = n - 1
while ( r % 2 ) == 0 :
s = s + 1
r = r // 2
for i in range ( t ) :
a = smallprimes [ i ]
y = modular_exp ( a , r , n )
if y != 1 and y != n - 1 :
j = 1
while j <= s - 1 and y != n - 1 :
y = modular_exp ( y , 2 , n )
if y == 1 :
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n - 1 :
miller_rabin_test_count = i + 1
return False
return True |
def usesTime ( self , fmt = None ) :
'''Check if the format uses the creation time of the record .''' | if fmt is None :
fmt = self . _fmt
if not isinstance ( fmt , basestring ) :
fmt = fmt [ 0 ]
return fmt . find ( '%(asctime)' ) >= 0 |
def init_process ( self ) -> None :
"""GunicornWorker 初始化回调""" | default_loop = asyncio . get_event_loop ( )
if default_loop . is_running ( ) :
default_loop . close ( )
self . loop = asyncio . new_event_loop ( )
asyncio . set_event_loop ( self . loop )
else :
self . loop = default_loop
super ( ) . init_process ( ) |
def list_delete ( self , id ) :
"""Delete a list .""" | id = self . __unpack_id ( id )
self . __api_request ( 'DELETE' , '/api/v1/lists/{0}' . format ( id ) ) |
def modify ( self , ** kwargs ) :
"""We need to implement the custom exclusive parameter check .""" | self . _check_exclusive_parameters ( ** kwargs )
return super ( Rule , self ) . _modify ( ** kwargs ) |
def get_nodes ( self , request ) :
"""This method is used to build the menu tree .""" | nodes = [ ]
for scientist in Scientist . objects . filter ( current = True , visible = True ) :
node = NavigationNode ( scientist . full_name , reverse ( 'lab_members:scientist_detail' , args = ( scientist . slug , ) ) , scientist . slug )
nodes . append ( node )
if Scientist . objects . filter ( current = False , visible = True ) :
node = NavigationNode ( 'Lab Alumni' , reverse ( 'lab_members:scientist_list' ) + '#lab-alumni' , scientist . slug )
nodes . append ( node )
return nodes |
def api_ebuio_forum_get_topics_by_tag_for_user ( request , key = None , hproPk = None , tag = None , userPk = None ) :
"""Return the list of topics using the tag pk""" | # Check API key ( in order to be sure that we have a valid one and that ' s correspond to the project
if not check_api_key ( request , key , hproPk ) :
return HttpResponseForbidden
if settings . PIAPI_STANDALONE :
return HttpResponse ( json . dumps ( { 'error' : 'no-on-ebuio' } ) , content_type = "application/json" )
# We get the plugit object representing the project
( _ , _ , hproject ) = getPlugItObject ( hproPk )
# We get the user and we check his rights
author_pk = request . GET . get ( 'u' )
if author_pk and author_pk . isdigit ( ) :
try :
from users . models import TechUser
user = TechUser . objects . get ( pk = author_pk )
except TechUser . DoesNotExist :
error = 'user-no-found'
user = generate_user ( mode = 'ano' )
else :
user = generate_user ( mode = 'ano' )
if not hproject . discuss_can_display_posts ( user ) :
return HttpResponseForbidden
# Verify the existence of the tag
if not tag :
raise Http404
# We get the posts ( only topics ones - the parent ) related to the project and to the tag .
# We dont ' take the deleted ones .
from discuss . models import Post
posts = Post . objects . filter ( is_deleted = False ) . filter ( object_id = hproPk ) . filter ( tags__tag = tag ) . order_by ( '-when' )
# We convert the posts list to json
posts_json = [ { 'id' : post . id , 'link' : post . discuss_get_forum_topic_link ( ) , 'subject' : post . title , 'author' : post . who_id , 'when' : post . when . strftime ( '%a, %d %b %Y %H:%M GMT' ) , 'score' : post . score , 'replies_number' : post . direct_subposts_size ( ) } for post in posts ]
return HttpResponse ( json . dumps ( { 'data' : posts_json } ) , content_type = "application/json" ) |
def set_options ( self , options ) :
"""options = [ {
' product _ option ' : instance of ProductFinalOption ,
' product _ final ' : instance of ProductFinal ,
' quantity ' : Float""" | with transaction . atomic ( ) :
for option in options :
opt = self . line_basket_option_sales . filter ( product_option = option [ 'product_option' ] ) . first ( )
if opt : # edit
change = False
if opt . quantity != option [ 'quantity' ] :
opt . quantity = option [ 'quantity' ]
change = True
if opt . product_final != option [ 'product_final' ] :
opt . product_final = option [ 'product_final' ]
change = True
if change :
opt . save ( )
else : # new
opt = SalesLineBasketOption ( )
# raise Exception ( self . pk , self . _ _ dict _ _ , self )
# raise Exception ( self . pk )
opt . line_budget = SalesLineBasket . objects . get ( pk = self . pk )
opt . product_option = option [ 'product_option' ]
opt . product_final = option [ 'product_final' ]
opt . quantity = option [ 'quantity' ]
opt . save ( ) |
def _name_messages_complete ( self ) :
"""Check if all name messages have been received""" | for channel in range ( 1 , self . number_of_channels ( ) + 1 ) :
try :
for name_index in range ( 1 , 4 ) :
if not isinstance ( self . _name_data [ channel ] [ name_index ] , str ) :
return False
except Exception :
return False
return True |
def print_context_info ( self ) :
"""Prints moderngl context info .""" | print ( "Context Version:" )
print ( 'ModernGL:' , moderngl . __version__ )
print ( 'vendor:' , self . ctx . info [ 'GL_VENDOR' ] )
print ( 'renderer:' , self . ctx . info [ 'GL_RENDERER' ] )
print ( 'version:' , self . ctx . info [ 'GL_VERSION' ] )
print ( 'python:' , sys . version )
print ( 'platform:' , sys . platform )
print ( 'code:' , self . ctx . version_code ) |
def close ( self ) :
"""Closes this VPCS VM .""" | if not ( yield from super ( ) . close ( ) ) :
return False
nio = self . _ethernet_adapter . get_nio ( 0 )
if isinstance ( nio , NIOUDP ) :
self . manager . port_manager . release_udp_port ( nio . lport , self . _project )
if self . _local_udp_tunnel :
self . manager . port_manager . release_udp_port ( self . _local_udp_tunnel [ 0 ] . lport , self . _project )
self . manager . port_manager . release_udp_port ( self . _local_udp_tunnel [ 1 ] . lport , self . _project )
self . _local_udp_tunnel = None
yield from self . _stop_ubridge ( )
if self . is_running ( ) :
self . _terminate_process ( )
return True |
def resolve_json_id ( self , json_id , allow_no_match = False ) :
"""Given an id found in scraped JSON , return a DB id for the object .
params :
json _ id : id from json
allow _ no _ match : just return None if id can ' t be resolved
returns :
database id
raises :
ValueError if id couldn ' t be resolved""" | if not json_id :
return None
if json_id . startswith ( '~' ) : # keep caches of all the pseudo - ids to avoid doing 1000s of lookups during import
if json_id not in self . pseudo_id_cache :
spec = get_pseudo_id ( json_id )
spec = self . limit_spec ( spec )
if isinstance ( spec , Q ) :
objects = self . model_class . objects . filter ( spec )
else :
objects = self . model_class . objects . filter ( ** spec )
ids = { each . id for each in objects }
if len ( ids ) == 1 :
self . pseudo_id_cache [ json_id ] = ids . pop ( )
errmsg = None
elif not ids :
errmsg = 'cannot resolve pseudo id to {}: {}' . format ( self . model_class . __name__ , json_id )
else :
errmsg = 'multiple objects returned for {} pseudo id {}: {}' . format ( self . model_class . __name__ , json_id , ids )
# either raise or log error
if errmsg :
if not allow_no_match :
raise UnresolvedIdError ( errmsg )
else :
self . error ( errmsg )
self . pseudo_id_cache [ json_id ] = None
# return the cached object
return self . pseudo_id_cache [ json_id ]
# get the id that the duplicate points to , or use self
json_id = self . duplicates . get ( json_id , json_id )
try :
return self . json_to_db_id [ json_id ]
except KeyError :
raise UnresolvedIdError ( 'cannot resolve id: {}' . format ( json_id ) ) |
def make_model ( self , grounding_ontology = 'UN' , grounding_threshold = None ) :
"""Return a networkx MultiDiGraph representing a causal analysis graph .
Parameters
grounding _ ontology : Optional [ str ]
The ontology from which the grounding should be taken
( e . g . UN , FAO )
grounding _ threshold : Optional [ float ]
Minimum threshold score for Eidos grounding .
Returns
nx . MultiDiGraph
The assembled CAG .""" | if grounding_threshold is not None :
self . grounding_threshold = grounding_threshold
self . grounding_ontology = grounding_ontology
# Filter to Influence Statements which are currently supported
statements = [ stmt for stmt in self . statements if isinstance ( stmt , Influence ) ]
# Initialize graph
self . CAG = nx . MultiDiGraph ( )
# Add nodes and edges to the graph
for s in statements : # Get standardized name of subject and object
# subj , obj = ( self . _ node _ name ( s . subj ) , self . _ node _ name ( s . obj ) )
# See if both subject and object have polarities given
has_both_polarity = ( s . subj . delta [ 'polarity' ] is not None and s . obj . delta [ 'polarity' ] is not None )
# Add the nodes to the graph
for node , delta in zip ( ( s . subj . concept , s . obj . concept ) , ( s . subj . delta , s . obj . delta ) ) :
self . CAG . add_node ( self . _node_name ( node ) , simulable = has_both_polarity , mods = delta [ 'adjectives' ] )
# Edge is solid if both nodes have polarity given
linestyle = 'solid' if has_both_polarity else 'dotted'
if has_both_polarity :
same_polarity = ( s . subj . delta [ 'polarity' ] == s . obj . delta [ 'polarity' ] )
if same_polarity :
target_arrow_shape , linecolor = ( 'circle' , 'green' )
else :
target_arrow_shape , linecolor = ( 'tee' , 'maroon' )
else :
target_arrow_shape , linecolor = ( 'triangle' , 'maroon' )
# Add edge to the graph with metadata from statement
provenance = [ ]
if s . evidence :
provenance = s . evidence [ 0 ] . annotations . get ( 'provenance' , [ ] )
if provenance :
provenance [ 0 ] [ 'text' ] = s . evidence [ 0 ] . text
self . CAG . add_edge ( self . _node_name ( s . subj . concept ) , self . _node_name ( s . obj . concept ) , subj_polarity = s . subj . delta [ 'polarity' ] , subj_adjectives = s . subj . delta [ 'adjectives' ] , obj_polarity = s . obj . delta [ 'polarity' ] , obj_adjectives = s . obj . delta [ 'adjectives' ] , linestyle = linestyle , linecolor = linecolor , targetArrowShape = target_arrow_shape , provenance = provenance , )
return self . CAG |
def qualname ( self ) -> str :
"""Returns the fully qualified name of the class - under - construction , if possible ,
otherwise just the class name .""" | if self . module :
return self . module + '.' + self . name
return self . name |
def find_codon_mismatches ( sbjct_start , sbjct_seq , qry_seq ) :
"""This function takes two alligned sequence ( subject and query ) , and
the position on the subject where the alignment starts . The sequences
are compared codon by codon . If a mis matches is found it is saved in
' mis _ matches ' . If a gap is found the function get _ inframe _ gap is used
to find the indel sequence and keep the sequence in the correct
reading frame . The function translate _ indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user .""" | mis_matches = [ ]
# Find start pos of first codon in frame , i _ start
codon_offset = ( sbjct_start - 1 ) % 3
i_start = 0
if codon_offset != 0 :
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq [ i_start : ]
qry_seq = qry_seq [ i_start : ]
# Find codon number of the first codon in the sequence , start at 0
codon_no = int ( ( sbjct_start - 1 ) / 3 )
# 1,2,3 start on 0
# s _ shift and q _ shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels ( sbjct_seq , qry_seq , sbjct_start )
# Go through sequence and save mutations when found
for index in range ( 0 , len ( sbjct_seq ) , 3 ) : # Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq [ s_i : s_i + 3 ]
qry_codon = qry_seq [ q_i : q_i + 3 ]
if len ( sbjct_seq [ s_i : ] . replace ( "-" , "" ) ) + len ( qry_codon [ q_i : ] . replace ( "-" , "" ) ) < 6 :
break
# Check for mutations
if sbjct_codon . upper ( ) != qry_codon . upper ( ) : # Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon : # Get indel info
try :
indel_data = indels [ indel_no ]
except IndexError :
print ( sbjct_codon , qry_codon )
print ( indels )
print ( gene , indel_data , indel_no )
mut = indel_data [ 0 ]
codon_no_indel = indel_data [ 1 ]
seq_pos = indel_data [ 2 ] + sbjct_start - 1
indel = indel_data [ 3 ]
indel_no += 1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins" :
sbjct_rf_indel = get_inframe_gap ( sbjct_seq [ s_i : ] , 3 )
qry_rf_indel = get_inframe_gap ( qry_seq [ q_i : ] , int ( math . floor ( len ( sbjct_rf_indel ) / 3 ) * 3 ) )
else :
qry_rf_indel = get_inframe_gap ( qry_seq [ q_i : ] , 3 )
sbjct_rf_indel = get_inframe_gap ( sbjct_seq [ s_i : ] , int ( math . floor ( len ( qry_rf_indel ) / 3 ) * 3 ) )
mut_name , aa_ref , aa_alt = name_indel_mutation ( sbjct_seq , indel , sbjct_rf_indel , qry_rf_indel , codon_no , mut , sbjct_start - 1 )
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs ( s_shift - q_shift )
s_shift += len ( sbjct_rf_indel ) - 3
q_shift += len ( qry_rf_indel ) - 3
shift_diff = abs ( s_shift - q_shift )
if shift_diff_before != 0 and shift_diff % 3 == 0 :
if s_shift > q_shift :
nucs_needed = int ( ( len ( sbjct_rf_indel ) / 3 ) * 3 ) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap ( qry_seq [ q_i : ] , nucs_needed )
q_shift += len ( qry_rf_indel ) - len ( pre_qry_indel )
elif q_shift > s_shift :
nucs_needed = int ( ( len ( qry_rf_indel ) / 3 ) * 3 ) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap ( sbjct_seq [ s_i : ] , nucs_needed )
s_shift += len ( sbjct_rf_indel ) - len ( pre_sbjct_indel )
mut_name , aa_ref , aa_alt = name_indel_mutation ( sbjct_seq , indel , sbjct_rf_indel , qry_rf_indel , codon_no , mut , sbjct_start - 1 )
if "Frameshift" in mut_name :
mut_name = mut_name . split ( "-" ) [ 0 ] + "- Frame restored"
mis_matches += [ [ mut , codon_no_indel , seq_pos , indel , mut_name , sbjct_rf_indel , qry_rf_indel , aa_ref , aa_alt ] ]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len ( re . findall ( "\-\w" , sbjct_rf_indel ) ) + len ( re . findall ( "\-\w" , qry_rf_indel ) )
if no_of_indels > 1 :
for j in range ( indel_no , indel_no + no_of_indels - 1 ) :
try :
indel_data = indels [ j ]
except IndexError :
sys . exit ( "indel_data list is out of range, bug!" )
mut = indel_data [ 0 ]
codon_no_indel = indel_data [ 1 ]
seq_pos = indel_data [ 2 ] + sbjct_start - 1
indel = indel_data [ 3 ]
indel_no += 1
mis_matches += [ [ mut , codon_no_indel , seq_pos , indel , mut_name , sbjct_rf_indel , qry_rf_indel , aa_ref , aa_alt ] ]
# Set codon number , and save nucleotides from out of frame mutations
if mut == "del" :
codon_no += int ( ( len ( sbjct_rf_indel ) - 3 ) / 3 )
# If evaluated insert is only gaps codon _ no should not increment
elif sbjct_rf_indel . count ( "-" ) == len ( sbjct_rf_indel ) :
codon_no -= 1
# Check of point mutations
else :
mut = "sub"
aa_ref = aa ( sbjct_codon )
aa_alt = aa ( qry_codon )
if aa_ref != aa_alt : # End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str ( codon_no ) + aa_alt
mis_matches += [ [ mut , codon_no , codon_no , aa_alt , mut_name , sbjct_codon , qry_codon , aa_ref , aa_alt ] ]
# If a Premature stop codon occur report it an stop the loop
try :
if mis_matches [ - 1 ] [ - 1 ] == "*" :
mut_name += " - Premature stop codon"
mis_matches [ - 1 ] [ 4 ] = mis_matches [ - 1 ] [ 4 ] . split ( "-" ) [ 0 ] + " - Premature stop codon"
break
except IndexError :
pass
# Sort mutations on position
mis_matches = sorted ( mis_matches , key = lambda x : x [ 1 ] )
return mis_matches |
def save_profile ( self , userdata , data ) :
"""Save user profile modifications""" | result = userdata
error = False
# Check if updating username .
if not userdata [ "username" ] and "username" in data :
if re . match ( r"^[-_|~0-9A-Z]{4,}$" , data [ "username" ] , re . IGNORECASE ) is None :
error = True
msg = _ ( "Invalid username format." )
elif self . database . users . find_one ( { "username" : data [ "username" ] } ) :
error = True
msg = _ ( "Username already taken" )
else :
result = self . database . users . find_one_and_update ( { "email" : userdata [ "email" ] } , { "$set" : { "username" : data [ "username" ] } } , return_document = ReturnDocument . AFTER )
if not result :
error = True
msg = _ ( "Incorrect email." )
else :
self . user_manager . connect_user ( result [ "username" ] , result [ "realname" ] , result [ "email" ] , result [ "language" ] )
msg = _ ( "Profile updated." )
return result , msg , error
# Check if updating the password .
if self . app . allow_registration and len ( data [ "passwd" ] ) in range ( 1 , 6 ) :
error = True
msg = _ ( "Password too short." )
return result , msg , error
elif self . app . allow_registration and len ( data [ "passwd" ] ) > 0 and data [ "passwd" ] != data [ "passwd2" ] :
error = True
msg = _ ( "Passwords don't match !" )
return result , msg , error
elif self . app . allow_registration and len ( data [ "passwd" ] ) >= 6 :
oldpasswd_hash = hashlib . sha512 ( data [ "oldpasswd" ] . encode ( "utf-8" ) ) . hexdigest ( )
passwd_hash = hashlib . sha512 ( data [ "passwd" ] . encode ( "utf-8" ) ) . hexdigest ( )
match = { "username" : self . user_manager . session_username ( ) }
if "password" in userdata :
match [ "password" ] = oldpasswd_hash
result = self . database . users . find_one_and_update ( match , { "$set" : { "password" : passwd_hash } } , return_document = ReturnDocument . AFTER )
if not result :
error = True
msg = _ ( "Incorrect old password." )
return result , msg , error
# Check if updating language
if data [ "language" ] != userdata [ "language" ] :
language = data [ "language" ] if data [ "language" ] in self . app . available_languages else "en"
result = self . database . users . find_one_and_update ( { "username" : self . user_manager . session_username ( ) } , { "$set" : { "language" : language } } , return_document = ReturnDocument . AFTER )
if not result :
error = True
msg = _ ( "Incorrect username." )
return result , msg , error
else :
self . user_manager . set_session_language ( language )
# Checks if updating name
if len ( data [ "realname" ] ) > 0 :
result = self . database . users . find_one_and_update ( { "username" : self . user_manager . session_username ( ) } , { "$set" : { "realname" : data [ "realname" ] } } , return_document = ReturnDocument . AFTER )
if not result :
error = True
msg = _ ( "Incorrect username." )
return result , msg , error
else :
self . user_manager . set_session_realname ( data [ "realname" ] )
else :
error = True
msg = _ ( "Name is too short." )
return result , msg , error
msg = _ ( "Profile updated." )
return result , msg , error |
def add_attribute ( self , attrkey , attrvalue , append = False , oldvalue = None ) :
"""Add an attribute to this feature .
Feature attributes are stored as nested dictionaries .
Each feature can only have one ID , so ID attribute mapping is ' string '
to ' string ' . All other attributes can have multiple values , so mapping
is ' string ' to ' dict of strings ' .
By default , adding an attribute that already exists will cause the old
value to be overwritten . If the ` append ` option is true , the new
attribute value will not overwrite the old value , but will be appended
as a second value . ( Note : ID attributes can have only 1 value . )
If the ` oldvalue ` option is set , the new value will replace the old
value . This is necessary for updating an attribute that has multiple
values without completely overwriting all old values . ( Note : The
` append ` option is ignored when ` oldvalue ` is set . )""" | # Handle ID / Parent relationships
if attrkey == 'ID' :
if self . children is not None :
oldid = self . get_attribute ( 'ID' )
for child in self . children :
child . add_attribute ( 'Parent' , attrvalue , oldvalue = oldid )
self . _attrs [ attrkey ] = attrvalue
if self . is_multi :
self . multi_rep . _attrs [ attrkey ] = attrvalue
for sibling in self . multi_rep . siblings :
sibling . _attrs [ attrkey ] = attrvalue
return
# Handle all other attribute types
if oldvalue is not None :
if attrkey in self . _attrs :
assert oldvalue in self . _attrs [ attrkey ]
del self . _attrs [ attrkey ] [ oldvalue ]
if attrkey not in self . _attrs or append is False :
self . _attrs [ attrkey ] = dict ( )
self . _attrs [ attrkey ] [ attrvalue ] = True |
def main ( ) :
'''Main routine .''' | # validate command line arguments
argparser = argparse . ArgumentParser ( )
argparser . add_argument ( '--uri' , '-u' , required = True , action = 'store' , help = 'Template URI' )
argparser . add_argument ( '--params' , '-f' , required = True , action = 'store' , help = 'Parameters json file' )
argparser . add_argument ( '--location' , '-l' , required = True , action = 'store' , help = 'Location, e.g. eastus' )
argparser . add_argument ( '--rg' , '-g' , required = False , action = 'store' , help = 'Resource Group name' )
argparser . add_argument ( '--sub' , '-s' , required = False , action = 'store' , help = 'Subscription ID' )
argparser . add_argument ( '--genparams' , '-p' , required = False , action = 'store' , help = 'Comma separated list of parameters to generate strings for' )
argparser . add_argument ( '--wait' , '-w' , required = False , action = 'store_true' , default = False , help = 'Wait for deployment to complete and time it' )
argparser . add_argument ( '--debug' , '-d' , required = False , action = 'store_true' , default = False , help = 'Debug mode: print additional deployment' )
args = argparser . parse_args ( )
template_uri = args . uri
params = args . params
rgname = args . rg
location = args . location
subscription_id = args . sub
# Load Azure app defaults
try :
with open ( 'azurermconfig.json' ) as configfile :
configdata = json . load ( configfile )
except FileNotFoundError :
sys . exit ( 'Error: Expecting azurermconfig.json in current folder' )
tenant_id = configdata [ 'tenantId' ]
app_id = configdata [ 'appId' ]
app_secret = configdata [ 'appSecret' ]
if subscription_id is None :
subscription_id = configdata [ 'subscriptionId' ]
# authenticate
access_token = azurerm . get_access_token ( tenant_id , app_id , app_secret )
# load parameters file
try :
with open ( params ) as params_file :
param_data = json . load ( params_file )
except FileNotFoundError :
sys . exit ( 'Error: Expecting ' + params + ' in current folder' )
# prep Haikunator
haikunator = Haikunator ( )
# if there is a genparams argument generate values and merge the list
if args . genparams is not None :
newdict = { }
genlist = args . genparams . split ( ',' )
for param in genlist : # generate a random prhase , include caps and puncs in case it ' s a passwd
newval = haikunator . haikunate ( delimiter = '-' ) . title ( )
newdict [ param ] = { 'value' : newval }
params = { ** param_data , ** newdict }
else :
params = param_data
# create resource group if not specified
if rgname is None :
rgname = haikunator . haikunate ( )
ret = azurerm . create_resource_group ( access_token , subscription_id , rgname , location )
print ( 'Creating resource group: ' + rgname + ', location:' , location + ', return code:' , ret )
deployment_name = haikunator . haikunate ( )
# measure time from beginning of deployment call ( after creating resource group etc . )
start_time = time . time ( )
# deploy template and print response
deploy_return = azurerm . deploy_template_uri ( access_token , subscription_id , rgname , deployment_name , template_uri , params )
print ( 'Deployment name: ' + deployment_name + ', return code:' , deploy_return )
if 'Response [20' not in str ( deploy_return ) :
print ( 'Return from deployment: ' , deploy_return . text )
sys . exit ( 'Deployment failed. Exiting.. ' )
if args . debug is True :
print ( json . dumps ( deploy_return . json ( ) , sort_keys = False , indent = 2 , separators = ( ',' , ': ' ) ) )
# show deployment status
if args . debug is True :
print ( 'Deployment status:' )
deploy_return = azurerm . show_deployment ( access_token , subscription_id , rgname , deployment_name )
print ( json . dumps ( deploy_return , sort_keys = False , indent = 2 , separators = ( ',' , ': ' ) ) )
# wait for deployment to complete
if args . wait is True :
print ( 'Waiting for provisioning to complete..' )
provisioning_state = ''
try :
while True :
time . sleep ( 10 )
deploy_return = azurerm . show_deployment ( access_token , subscription_id , rgname , deployment_name )
provisioning_state = deploy_return [ 'properties' ] [ 'provisioningState' ]
if provisioning_state != 'Running' :
break
print ( 'Provisioning state:' , provisioning_state )
except KeyError :
print ( 'Deployment failure:' , deploy_return )
elapsed_time = time . time ( ) - start_time
print ( 'Elapsed time:' , elapsed_time ) |
def writeConfig ( self ) :
"""Persists the value of the : attr : ` AbstractJobStore . config ` attribute to the
job store , so that it can be retrieved later by other instances of this class .""" | with self . writeSharedFileStream ( 'config.pickle' , isProtected = False ) as fileHandle :
pickle . dump ( self . __config , fileHandle , pickle . HIGHEST_PROTOCOL ) |
def init_db ( ) :
"""Initialize a new database with the default tables for chill .
Creates the following tables :
Chill
Node
Node _ Node
Route
Query
Template""" | with current_app . app_context ( ) :
for filename in CHILL_CREATE_TABLE_FILES :
db . execute ( text ( fetch_query_string ( filename ) ) ) |
def init_with_keytab ( self ) :
"""Initialize credential cache with keytab""" | creds_opts = { 'usage' : 'initiate' , 'name' : self . _cleaned_options [ 'principal' ] , }
store = { }
if self . _cleaned_options [ 'keytab' ] != DEFAULT_KEYTAB :
store [ 'client_keytab' ] = self . _cleaned_options [ 'keytab' ]
if self . _cleaned_options [ 'ccache' ] != DEFAULT_CCACHE :
store [ 'ccache' ] = self . _cleaned_options [ 'ccache' ]
if store :
creds_opts [ 'store' ] = store
creds = gssapi . creds . Credentials ( ** creds_opts )
try :
creds . lifetime
except gssapi . exceptions . ExpiredCredentialsError :
new_creds_opts = copy . deepcopy ( creds_opts )
# Get new credential and put it into a temporary ccache
if 'store' in new_creds_opts :
new_creds_opts [ 'store' ] [ 'ccache' ] = _get_temp_ccache ( )
else :
new_creds_opts [ 'store' ] = { 'ccache' : _get_temp_ccache ( ) }
creds = gssapi . creds . Credentials ( ** new_creds_opts )
# Then , store new credential back to original specified ccache ,
# whatever a given ccache file or the default one .
_store = None
# If default cccache is used , no need to specify ccache in store
# parameter passed to ` ` creds . store ` ` .
if self . _cleaned_options [ 'ccache' ] != DEFAULT_CCACHE :
_store = { 'ccache' : store [ 'ccache' ] }
creds . store ( usage = 'initiate' , store = _store , overwrite = True ) |
def write_record ( self , event_str ) :
"""Writes a serialized event to file .""" | header = struct . pack ( 'Q' , len ( event_str ) )
header += struct . pack ( 'I' , masked_crc32c ( header ) )
footer = struct . pack ( 'I' , masked_crc32c ( event_str ) )
self . _writer . write ( header + event_str + footer ) |
def scales ( scale = None ) :
"""Displays a color scale ( HTML )
Parameters :
scale : str
Color scale name
If no scale name is provided then all scales are returned
( max number for each scale )
If scale = ' all ' then all scale combinations available
will be returned
Example :
scales ( ' accent ' )
scales ( ' all ' )
scales ( )""" | if scale :
if scale == 'all' :
display ( HTML ( cl . to_html ( _scales ) ) )
else :
display ( HTML ( cl . to_html ( get_scales ( scale ) ) ) )
else :
s = ''
keys = list ( _scales_names . keys ( ) )
keys . sort ( )
for k in keys :
scale = get_scales ( k )
s += '<div style="display:inline-block;padding:10px;"><div>{0}</div>{1}</div>' . format ( k , cl . to_html ( scale ) )
display ( HTML ( s ) ) |
def _do_anchor ( self , anchor ) :
"""Collects preposition anchors and attachments in a dictionary .
Once the dictionary has an entry for both the anchor and the attachment , they are linked .""" | if anchor :
for x in anchor . split ( "-" ) :
A , P = None , None
if x . startswith ( "A" ) and len ( self . chunks ) > 0 : # anchor
A , P = x , x . replace ( "A" , "P" )
self . _anchors [ A ] = self . chunks [ - 1 ]
if x . startswith ( "P" ) and len ( self . pnp ) > 0 : # attachment ( PNP )
A , P = x . replace ( "P" , "A" ) , x
self . _anchors [ P ] = self . pnp [ - 1 ]
if A in self . _anchors and P in self . _anchors and not self . _anchors [ P ] . anchor :
pnp = self . _anchors [ P ]
pnp . anchor = self . _anchors [ A ]
pnp . anchor . attachments . append ( pnp ) |
def description_for_number ( numobj , lang , script = None , region = None ) :
"""Return a text description of a PhoneNumber object for the given language .
The description might consist of the name of the country where the phone
number is from and / or the name of the geographical area the phone number
is from . This function explicitly checks the validity of the number passed in
Arguments :
numobj - - The PhoneNumber object for which we want to get a text description .
lang - - A 2 - letter lowercase ISO 639-1 language code for the language in
which the description should be returned ( e . g . " en " )
script - - A 4 - letter titlecase ( first letter uppercase , rest lowercase )
ISO script code as defined in ISO 15924 , separated by an
underscore ( e . g . " Hant " )
region - - The region code for a given user . This region will be omitted
from the description if the phone number comes from this
region . It should be a two - letter upper - case CLDR region
code .
Returns a text description in the given language code , for the given phone
number , or an empty string if no description is available .""" | ntype = number_type ( numobj )
if ntype == PhoneNumberType . UNKNOWN :
return ""
elif not is_number_type_geographical ( ntype , numobj . country_code ) :
return country_name_for_number ( numobj , lang , script , region )
return description_for_valid_number ( numobj , lang , script , region ) |
def buildFinished ( self , build , wfb ) :
"""This is called when the Build has finished ( either success or
failure ) . Any exceptions during the build are reported with
results = FAILURE , not with an errback .""" | # by the time we get here , the Build has already released the worker ,
# which will trigger a check for any now - possible build requests
# ( maybeStartBuilds )
results = build . build_status . getResults ( )
self . building . remove ( build )
if results == RETRY :
d = self . _resubmit_buildreqs ( build )
d . addErrback ( log . err , 'while resubmitting a build request' )
else :
complete_at_epoch = self . master . reactor . seconds ( )
complete_at = epoch2datetime ( complete_at_epoch )
brids = [ br . id for br in build . requests ]
d = self . master . data . updates . completeBuildRequests ( brids , results , complete_at = complete_at )
# nothing in particular to do with this deferred , so just log it if
# it fails . .
d . addErrback ( log . err , 'while marking build requests as completed' )
if wfb . worker :
wfb . worker . releaseLocks ( ) |
def times ( A , b , offset = 0 ) :
"""Times the view of A with b in place ( ! ) .
Returns modified A
Broadcasting is allowed , thus b can be scalar .
if offset is not zero , make sure b is of right shape !
: param ndarray A : 2 dimensional array
: param ndarray - like b : either one dimensional or scalar
: param int offset : same as in view .
: rtype : view of A , which is adjusted inplace""" | return _diag_ufunc ( A , b , offset , np . multiply ) |
def generate_sky_catalog ( image , refwcs , ** kwargs ) :
"""Build source catalog from input image using photutils .
This script borrows heavily from build _ source _ catalog .
The catalog returned by this function includes sources found in all chips
of the input image with the positions translated to the coordinate frame
defined by the reference WCS ` refwcs ` . The sources will be
- identified using photutils segmentation - based source finding code
- ignore any input pixel which has been flagged as ' bad ' in the DQ
array , should a DQ array be found in the input HDUList .
- classified as probable cosmic - rays ( if enabled ) using central _ moments
properties of each source , with these sources being removed from the
catalog .
Parameters
image : ~ astropy . io . fits . HDUList `
Input image .
refwcs : ` ~ stwcs . wcsutils . HSTWCS `
Definition of the reference frame WCS .
dqname : str
EXTNAME for the DQ array , if present , in the input image .
output : bool
Specify whether or not to write out a separate catalog file for all the
sources found in each chip . Default : None ( False )
threshold : float , optional
This parameter controls the S / N threshold used for identifying sources in
the image relative to the background RMS in much the same way that
the ' threshold ' parameter in ' tweakreg ' works .
fwhm : float , optional
FWHM ( in pixels ) of the expected sources from the image , comparable to the
' conv _ width ' parameter from ' tweakreg ' . Objects with FWHM closest to
this value will be identified as sources in the catalog .
Returns
master _ cat : ` ~ astropy . table . Table `
Source catalog for all ' valid ' sources identified from all chips of the
input image with positions translated to the reference WCS coordinate
frame .""" | # Extract source catalogs for each chip
source_cats = generate_source_catalog ( image , ** kwargs )
# Build source catalog for entire image
master_cat = None
numSci = countExtn ( image , extname = 'SCI' )
# if no refwcs specified , build one now . . .
if refwcs is None :
refwcs = build_reference_wcs ( [ image ] )
for chip in range ( numSci ) :
chip += 1
# work with sources identified from this specific chip
seg_tab_phot = source_cats [ chip ]
if seg_tab_phot is None :
continue
# Convert pixel coordinates from this chip to sky coordinates
chip_wcs = wcsutil . HSTWCS ( image , ext = ( 'sci' , chip ) )
seg_ra , seg_dec = chip_wcs . all_pix2world ( seg_tab_phot [ 'xcentroid' ] , seg_tab_phot [ 'ycentroid' ] , 1 )
# Convert sky positions to pixel positions in the reference WCS frame
seg_xy_out = refwcs . all_world2pix ( seg_ra , seg_dec , 1 )
seg_tab_phot [ 'xcentroid' ] = seg_xy_out [ 0 ]
seg_tab_phot [ 'ycentroid' ] = seg_xy_out [ 1 ]
if master_cat is None :
master_cat = seg_tab_phot
else :
master_cat = vstack ( [ master_cat , seg_tab_phot ] )
return master_cat |
def whoami ( self ) :
"""Return a Deferred which fires with a 2 - tuple of ( dotted quad ip , port
number ) .""" | def cbWhoAmI ( result ) :
return result [ 'address' ]
return self . callRemote ( WhoAmI ) . addCallback ( cbWhoAmI ) |
def ToScriptHash ( data , unhex = True ) :
"""Get a script hash of the data .
Args :
data ( bytes ) : data to hash .
unhex ( bool ) : ( Default ) True . Set to unhexlify the stream . Use when the bytes are not raw bytes ; i . e . b ' aabb '
Returns :
UInt160 : script hash .""" | if len ( data ) > 1 and unhex :
data = binascii . unhexlify ( data )
return UInt160 ( data = binascii . unhexlify ( bytes ( Crypto . Hash160 ( data ) , encoding = 'utf-8' ) ) ) |
def get_render_data ( self , ** kwargs ) :
"""Adds the model _ name to the context , then calls super .""" | kwargs [ 'model_name' ] = self . model_name
kwargs [ 'model_name_plural' ] = self . model_name_plural
return super ( ModelCMSView , self ) . get_render_data ( ** kwargs ) |
def ts_stream_keys ( self , table , timeout = None ) :
"""Streams keys from a timeseries table , returning an iterator that
yields lists of keys .""" | msg_code = riak . pb . messages . MSG_CODE_TS_LIST_KEYS_REQ
codec = self . _get_codec ( msg_code )
msg = codec . encode_timeseries_listkeysreq ( table , timeout )
self . _send_msg ( msg . msg_code , msg . data )
return PbufTsKeyStream ( self , codec , self . _ts_convert_timestamp ) |
def _start_browsing_some_sites ( self ) :
'''Starts browsing some sites .
Raises :
NoBrowsersAvailable if none available''' | # acquire _ multi ( ) raises NoBrowsersAvailable if none available
browsers = self . _browser_pool . acquire_multi ( ( self . _browser_pool . num_available ( ) + 1 ) // 2 )
try :
sites = self . _frontier . claim_sites ( len ( browsers ) )
except :
self . _browser_pool . release_all ( browsers )
raise
for i in range ( len ( browsers ) ) :
if i < len ( sites ) :
th = threading . Thread ( target = self . _brozzle_site_thread_target , args = ( browsers [ i ] , sites [ i ] ) , name = "BrozzlingThread:%s" % browsers [ i ] . chrome . port , daemon = True )
with self . _browsing_threads_lock :
self . _browsing_threads . add ( th )
th . start ( )
else :
self . _browser_pool . release ( browsers [ i ] ) |
def cues ( self , rename_inhibitors = False ) :
"""Returns stimuli and inhibitors species of this experimental setup
Parameters
rename _ inhibitors : boolean
If True , rename inhibitors with an ending ' i ' as in MIDAS files .
Returns
list
List of species names in order : first stimuli followed by inhibitors""" | if rename_inhibitors :
return self . stimuli + [ i + 'i' for i in self . inhibitors ]
else :
return self . stimuli + self . inhibitors |
def vec_angle ( vec1 , vec2 ) :
"""Angle between two R - dimensional vectors .
Angle calculated as :
. . math : :
\\ arccos \\ left [
\\ frac { \\ mathsf { vec1 } \ cdot \\ mathsf { vec2 } }
{ \\ left \\ | \\ mathsf { vec1} \\ right \\ |
\\ left \\ | \\ mathsf { vec2} \\ right \\ | }
\\ right ]
Parameters
vec1
length - R | npfloat _ | - -
First vector
vec2
length - R | npfloat _ | - -
Second vector
Returns
angle
| npfloat _ | - -
Angle between the two vectors in degrees""" | # Imports
import numpy as np
from scipy import linalg as spla
from . . const import PRM
# Check shape and equal length
if len ( vec1 . shape ) != 1 :
raise ValueError ( "'vec1' is not a vector" )
# # end if
if len ( vec2 . shape ) != 1 :
raise ValueError ( "'vec2' is not a vector" )
# # end if
if vec1 . shape [ 0 ] != vec2 . shape [ 0 ] :
raise ValueError ( "Vector lengths are not equal" )
# # end if
# Check magnitudes
if spla . norm ( vec1 ) < PRM . ZERO_VEC_TOL :
raise ValueError ( "'vec1' norm is too small" )
# # end if
if spla . norm ( vec2 ) < PRM . ZERO_VEC_TOL :
raise ValueError ( "'vec2' norm is too small" )
# # end if
# Calculate the angle and return . Do in multiple steps to test for
# possible > 1 or < - 1 values from numerical precision errors .
dotp = np . dot ( vec1 , vec2 ) / spla . norm ( vec1 ) / spla . norm ( vec2 )
if dotp > 1 :
angle = 0.
# pragma : no cover
elif dotp < - 1 :
angle = 180.
# pragma : no cover
else :
angle = np . degrees ( np . arccos ( dotp ) )
# # end if
return angle |
async def close ( self ) -> None :
"""Explicit exit . Closes pool . For use when keeping pool open across multiple calls .""" | LOGGER . debug ( 'NodePool.close >>>' )
if not self . handle :
LOGGER . warning ( 'Abstaining from closing pool %s: already closed' , self . name )
else :
await pool . close_pool_ledger ( self . handle )
self . _handle = None
LOGGER . debug ( 'NodePool.close <<<' ) |
def ccmod_xstep ( k ) :
"""Do the X step of the ccmod stage . The only parameter is the slice
index ` k ` and there are no return values ; all inputs and outputs are
from and to global variables .""" | YU = mp_D_Y - mp_D_U [ k ]
b = mp_ZSf [ k ] + mp_drho * sl . rfftn ( YU , None , mp_cri . axisN )
Xf = sl . solvedbi_sm ( mp_Zf [ k ] , mp_drho , b , axis = mp_cri . axisM )
mp_D_X [ k ] = sl . irfftn ( Xf , mp_cri . Nv , mp_cri . axisN ) |
def prin ( * args , ** kwargs ) :
r"""Like ` ` print ` ` , but a function . I . e . prints out all arguments as
` ` print ` ` would do . Specify output stream like this : :
print ( ' ERROR ' , ` out = " sys . stderr " ` ` ) .""" | print >> kwargs . get ( 'out' , None ) , " " . join ( [ str ( arg ) for arg in args ] ) |
def ensure_specification_cols_are_in_dataframe ( specification , dataframe ) :
"""Checks whether each column in ` specification ` is in ` dataframe ` . Raises
ValueError if any of the columns are not in the dataframe .
Parameters
specification : OrderedDict .
Keys are a proper subset of the columns in ` data ` . Values are either a
list or a single string , " all _ diff " or " all _ same " . If a list , the
elements should be :
- single objects that are in the alternative ID column of ` data `
- lists of objects that are within the alternative ID column of
` data ` . For each single object in the list , a unique column will
be created ( i . e . there will be a unique coefficient for that
variable in the corresponding utility equation of the
corresponding alternative ) . For lists within the
` specification ` values , a single column will be created for all
the alternatives within the iterable ( i . e . there will be one
common coefficient for the variables in the iterable ) .
dataframe : pandas DataFrame .
Dataframe containing the data for the choice model to be estimated .
Returns
None .""" | # Make sure specification is an OrderedDict
try :
assert isinstance ( specification , OrderedDict )
except AssertionError :
raise TypeError ( "`specification` must be an OrderedDict." )
# Make sure dataframe is a pandas dataframe
assert isinstance ( dataframe , pd . DataFrame )
problem_cols = [ ]
dataframe_cols = dataframe . columns
for key in specification :
if key not in dataframe_cols :
problem_cols . append ( key )
if problem_cols != [ ] :
msg = "The following keys in the specification are not in 'data':\n{}"
raise ValueError ( msg . format ( problem_cols ) )
return None |
def probe_response ( msg , arg ) :
"""Process responses from from the query sent by genl _ ctrl _ probe _ by _ name ( ) .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / genl / ctrl . c # L203
Process returned messages , filling out the missing information in the genl _ family structure .
Positional arguments :
msg - - returned message ( nl _ msg class instance ) .
arg - - genl _ family class instance to fill out .
Returns :
Indicator to keep processing frames or not ( NL _ SKIP or NL _ STOP ) .""" | tb = dict ( ( i , None ) for i in range ( CTRL_ATTR_MAX + 1 ) )
nlh = nlmsg_hdr ( msg )
ret = arg
if genlmsg_parse ( nlh , 0 , tb , CTRL_ATTR_MAX , ctrl_policy ) :
return NL_SKIP
if tb [ CTRL_ATTR_FAMILY_ID ] :
genl_family_set_id ( ret , nla_get_u16 ( tb [ CTRL_ATTR_FAMILY_ID ] ) )
if tb [ CTRL_ATTR_MCAST_GROUPS ] and parse_mcast_grps ( ret , tb [ CTRL_ATTR_MCAST_GROUPS ] ) < 0 :
return NL_SKIP
return NL_STOP |
def parse_runway_config ( self ) :
"""Read and parse runway . yml .""" | if not os . path . isfile ( self . runway_config_path ) :
LOGGER . error ( "Runway config file was not found (looking for " "%s)" , self . runway_config_path )
sys . exit ( 1 )
with open ( self . runway_config_path ) as data_file :
return yaml . safe_load ( data_file ) |
def _validate_metadata ( metadata_props ) :
'''Validate metadata properties and possibly show warnings or throw exceptions .
: param metadata _ props : A dictionary of metadata properties , with property names and values ( see : func : ` ~ onnxmltools . utils . metadata _ props . add _ metadata _ props ` for examples )''' | if len ( CaseInsensitiveDict ( metadata_props ) ) != len ( metadata_props ) :
raise RuntimeError ( 'Duplicate metadata props found' )
for key , value in metadata_props . items ( ) :
valid_values = KNOWN_METADATA_PROPS . get ( key )
if valid_values and value . lower ( ) not in valid_values :
warnings . warn ( 'Key {} has invalid value {}. Valid values are {}' . format ( key , value , valid_values ) ) |
def set_data ( self , data = None , ** kwargs ) :
'''Read data into memory , applying all actions in queue .
Additionally , update queue and history .''' | if data is None :
data = self . get_data ( ** kwargs )
setattr ( self , '_data' , data )
self . history += self . queue
self . queue = [ ] |
def rlmb_tiny_sv2p ( ) :
"""Tiny setting with a tiny sv2p model .""" | hparams = rlmb_ppo_tiny ( )
hparams . generative_model = "next_frame_sv2p"
hparams . generative_model_params = "next_frame_sv2p_tiny"
hparams . grayscale = False
return hparams |
def matrix_mult_opt_order ( M ) :
"""Matrix chain multiplication optimal order
: param M : list of matrices
: returns : matrices opt , arg , such that opt [ i ] [ j ] is the optimal number of
operations to compute M [ i ] * . . . * M [ j ] when done in the order
( M [ i ] * . . . * M [ k ] ) * ( M [ k + 1 ] * . . . * M [ j ] ) for k = arg [ i ] [ j ]
: complexity : : math : ` O ( n ^ 2 ) `""" | n = len ( M )
r = [ len ( Mi ) for Mi in M ]
c = [ len ( Mi [ 0 ] ) for Mi in M ]
opt = [ [ 0 for j in range ( n ) ] for i in range ( n ) ]
arg = [ [ None for j in range ( n ) ] for i in range ( n ) ]
for j_i in range ( 1 , n ) : # loop on i , j of increasing j - i = j _ i
for i in range ( n - j_i ) :
j = i + j_i
opt [ i ] [ j ] = float ( 'inf' )
for k in range ( i , j ) :
alt = opt [ i ] [ k ] + opt [ k + 1 ] [ j ] + r [ i ] * c [ k ] * c [ j ]
if opt [ i ] [ j ] > alt :
opt [ i ] [ j ] = alt
arg [ i ] [ j ] = k
return opt , arg |
def read_micromanager_metadata ( fh ) :
"""Read MicroManager non - TIFF settings from open file and return as dict .
The settings can be used to read image data without parsing the TIFF file .
Raise ValueError if the file does not contain valid MicroManager metadata .""" | fh . seek ( 0 )
try :
byteorder = { b'II' : '<' , b'MM' : '>' } [ fh . read ( 2 ) ]
except IndexError :
raise ValueError ( 'not a MicroManager TIFF file' )
result = { }
fh . seek ( 8 )
( index_header , index_offset , display_header , display_offset , comments_header , comments_offset , summary_header , summary_length ) = struct . unpack ( byteorder + 'IIIIIIII' , fh . read ( 32 ) )
if summary_header != 2355492 :
raise ValueError ( 'invalid MicroManager summary header' )
result [ 'Summary' ] = read_json ( fh , byteorder , None , summary_length , None )
if index_header != 54773648 :
raise ValueError ( 'invalid MicroManager index header' )
fh . seek ( index_offset )
header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) )
if header != 3453623 :
raise ValueError ( 'invalid MicroManager index header' )
data = struct . unpack ( byteorder + 'IIIII' * count , fh . read ( 20 * count ) )
result [ 'IndexMap' ] = { 'Channel' : data [ : : 5 ] , 'Slice' : data [ 1 : : 5 ] , 'Frame' : data [ 2 : : 5 ] , 'Position' : data [ 3 : : 5 ] , 'Offset' : data [ 4 : : 5 ] }
if display_header != 483765892 :
raise ValueError ( 'invalid MicroManager display header' )
fh . seek ( display_offset )
header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) )
if header != 347834724 :
raise ValueError ( 'invalid MicroManager display header' )
result [ 'DisplaySettings' ] = read_json ( fh , byteorder , None , count , None )
if comments_header != 99384722 :
raise ValueError ( 'invalid MicroManager comments header' )
fh . seek ( comments_offset )
header , count = struct . unpack ( byteorder + 'II' , fh . read ( 8 ) )
if header != 84720485 :
raise ValueError ( 'invalid MicroManager comments header' )
result [ 'Comments' ] = read_json ( fh , byteorder , None , count , None )
return result |
def isPrefixOf ( self , other ) :
"""Indicate if this | ASN . 1 | object is a prefix of other | ASN . 1 | object .
Parameters
other : | ASN . 1 | object
| ASN . 1 | object
Returns
: : class : ` bool `
: class : ` True ` if this | ASN . 1 | object is a parent ( e . g . prefix ) of the other | ASN . 1 | object
or : class : ` False ` otherwise .""" | l = len ( self )
if l <= len ( other ) :
if self . _value [ : l ] == other [ : l ] :
return True
return False |
def _must_not_custom_query ( issn ) :
"""Este metodo constroi a lista de filtros por título de periódico que
será aplicada na pesquisa boleana como restrição " must _ not " .
A lista de filtros é coletada do template de pesquisa customizada
do periódico , quanto este template existir .""" | custom_queries = set ( [ utils . cleanup_string ( i ) for i in journal_titles . load ( issn ) . get ( 'must_not' , [ ] ) ] )
for item in custom_queries :
query = { "match" : { "reference_source_cleaned" : item } }
yield query |
def generate_plaintext_random ( plain_vocab , distribution , train_samples , length ) :
"""Generates samples of text from the provided vocabulary .
Args :
plain _ vocab : vocabulary .
distribution : distribution .
train _ samples : samples for training .
length : length .
Returns :
train _ indices ( np . array of Integers ) : random integers for training .
shape = [ num _ samples , length ]
test _ indices ( np . array of Integers ) : random integers for testing .
shape = [ num _ samples , length ]
plain _ vocab ( list of Integers ) : unique vocabularies .""" | if distribution is not None :
assert len ( distribution ) == len ( plain_vocab )
train_indices = np . random . choice ( range ( len ( plain_vocab ) ) , ( train_samples , length ) , p = distribution )
return train_indices |
def importcmd ( self , image_path , input_source ) :
'''import will import ( stdin ) to the image
Parameters
image _ path : path to image to import to .
input _ source : input source or file
import _ type : if not specified , imports whatever function is given''' | from spython . utils import check_install
check_install ( )
cmd = [ 'singularity' , 'image.import' , image_path , input_source ]
output = self . run_command ( cmd , sudo = False )
self . println ( output )
return image_path |
def register_forward_hook ( self , hook ) :
r"""Registers a forward hook on the block .
The hook function is called immediately after : func : ` forward ` .
It should not modify the input or output .
Parameters
hook : callable
The forward hook function of form ` hook ( block , input , output ) - > None ` .
Returns
: class : ` mxnet . gluon . utils . HookHandle `""" | handle = HookHandle ( )
handle . attach ( self . _forward_hooks , hook )
return handle |
def _is_surrounded ( self , b ) :
"""Perform a wrapped LTE comparison only considering the SI bounds
: param a : The first operand
: param b : The second operand
: return : True if a < = b , False otherwise""" | a = self
if a . is_empty :
return True
if a . is_top and b . is_top :
return True
elif a . is_top :
return False
elif b . is_top :
return True
if b . _surrounds_member ( a . lower_bound ) and b . _surrounds_member ( a . upper_bound ) :
if ( ( b . lower_bound == a . lower_bound and b . upper_bound == a . upper_bound ) or not a . _surrounds_member ( b . lower_bound ) or not a . _surrounds_member ( b . upper_bound ) ) :
return True
return False |
def module_function ( string ) :
"""Load a function from a python module using a file name , function name
specification of format :
/ path / to / x . py : function _ name [ : parameter ]""" | parts = string . split ( ':' , 2 )
if len ( parts ) < 2 :
raise ValueError ( "Illegal specification. Should be module:function[:parameter]" )
module_path , function_name = parts [ : 2 ]
# Import the module
module_vars = { }
exec ( compile ( open ( module_path ) . read ( ) , module_path , 'exec' ) , module_vars )
try :
function = module_vars [ function_name ]
except KeyError :
raise argparse . ArgumentTypeError ( "{0} has no attribute '{1}'" . format ( module_path , function_name ) )
if len ( parts ) == 3 :
old_function = function
function = lambda r : old_function ( r , parts [ 2 ] )
return function |
def returner ( ret ) :
'''Return data to a influxdb data store''' | serv = _get_serv ( ret )
# strip the ' return ' key to avoid data duplication in the database
json_return = salt . utils . json . dumps ( ret [ 'return' ] )
del ret [ 'return' ]
json_full_ret = salt . utils . json . dumps ( ret )
# create legacy request in case an InfluxDB 0.8 . x version is used
if "influxdb08" in serv . __module__ :
req = [ { 'name' : 'returns' , 'columns' : [ 'fun' , 'id' , 'jid' , 'return' , 'full_ret' ] , 'points' : [ [ ret [ 'fun' ] , ret [ 'id' ] , ret [ 'jid' ] , json_return , json_full_ret ] ] , } ]
# create InfluxDB 0.9 + version request
else :
req = [ { 'measurement' : 'returns' , 'tags' : { 'fun' : ret [ 'fun' ] , 'id' : ret [ 'id' ] , 'jid' : ret [ 'jid' ] } , 'fields' : { 'return' : json_return , 'full_ret' : json_full_ret } } ]
try :
serv . write_points ( req )
except Exception as ex :
log . critical ( 'Failed to store return with InfluxDB returner: %s' , ex ) |
def filter_seq ( seq ) :
'''Examines unreserved sequences to see if they are prone to mutation . This
currently ignores solely - power - of - 2 guides with b > 3''' | if seq . res :
return None
n = nt . Factors ( seq . factors )
guide , s , t = aq . canonical_form ( n )
seq . guide = guide
# The target _ tau for the composite is at most the class minus extant prime factor count
cls = aq . get_class ( guide = guide )
num_larges = seq . factors . count ( 'P' )
upper_bound_tau = cls - num_larges - len ( t )
if cls < 2 or upper_bound_tau < 2 : # Cheap tests to eliminate almost all sequences
return None
# Next we ignore sequences whose guide is solely a power of 2 greater than 3
v = nt . Factors ( { p : a for p , a in guide . items ( ) if p != 2 and a > 0 } )
if int ( v ) == 1 and cls > 3 :
return None
# This condition greatly reduces fdb load , but excludes a lot of sequences
if not aq . is_driver ( guide = guide ) :
return None
return n , guide |
def dict_merge ( dct , merge_dct ) :
"""Recursive dict merge . Inspired by : meth : ` ` dict . update ( ) ` ` , instead of
updating only top - level keys , dict _ merge recurses down into dicts nested
to an arbitrary depth , updating keys . The ` ` merge _ dct ` ` is merged into
` ` dct ` ` .
: param dct : dict onto which the merge is executed
: param merge _ dct : dct merged into dct
: return : None""" | for key in merge_dct . keys ( ) :
if ( key in dct and isinstance ( dct [ key ] , dict ) and isinstance ( merge_dct [ key ] , collections . Mapping ) ) :
dict_merge ( dct [ key ] , merge_dct [ key ] )
else :
dct [ key ] = merge_dct [ key ] |
def terminate ( self ) :
"""Delete all files created by this index , invalidating ` self ` . Use with care .""" | try :
self . id2sims . terminate ( )
except :
pass
import glob
for fname in glob . glob ( self . fname + '*' ) :
try :
os . remove ( fname )
logger . info ( "deleted %s" % fname )
except Exception , e :
logger . warning ( "failed to delete %s: %s" % ( fname , e ) )
for val in self . __dict__ . keys ( ) :
try :
delattr ( self , val )
except :
pass |
def create_token_review ( self , body , ** kwargs ) :
"""create a TokenReview
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . create _ token _ review ( body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param V1beta1TokenReview body : ( required )
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: param str pretty : If ' true ' , then the output is pretty printed .
: return : V1beta1TokenReview
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . create_token_review_with_http_info ( body , ** kwargs )
else :
( data ) = self . create_token_review_with_http_info ( body , ** kwargs )
return data |
def render_region_tools ( context , feincms_object , region , request = None ) :
"""{ % render _ region _ tools feincms _ page " main " request % }
skip rendering in standalone mode""" | if context . get ( 'standalone' , False ) or not feincms_object :
return { }
edit = False
if getattr ( settings , 'LEONARDO_USE_PAGE_ADMIN' , False ) :
request = context . get ( 'request' , None )
frontend_edit = request . COOKIES . get ( 'frontend_editing' , False )
if frontend_edit :
edit = True
return { 'edit' : edit , 'feincms_object' : feincms_object , 'region' : region , 'region_name' : get_page_region ( region ) , 'widget_add_url' : reverse_lazy ( 'widget_create' , args = [ feincms_object . id , region , '%s.%s' % ( feincms_object . _meta . app_label , feincms_object . __class__ . __name__ ) ] ) } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.