signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _authenticate ( self ) :
"""run any request against the API just to make sure the credentials
are valid
: return bool : success status
: raises Exception : on error"""
|
opts = { 'domain' : self . _domain }
opts . update ( self . _auth )
response = self . _api . domain . info ( opts )
self . _validate_response ( response = response , message = 'Failed to authenticate' )
# set to fake id to pass tests , inwx doesn ' t work on domain id but
# uses domain names for identification
self . domain_id = 1
return True
|
def auth ( self , code = "" , refresh_token = "" ) :
"""Authenticates user and retrieves ( and refreshes ) access token
: param code : code provided after redirect ( authorization _ code only )
: param refresh _ token : the refresh _ token to update access _ token without authorization"""
|
if refresh_token :
try :
self . oauth . request_token ( grant_type = "refresh_token" , refresh_token = refresh_token )
self . refresh_token = self . oauth . refresh_token
except HTTPError as e :
if e . code == 401 :
raise DeviantartError ( "Unauthorized: Please check your credentials (client_id and client_secret)." )
else :
raise DeviantartError ( e )
elif self . standard_grant_type == "authorization_code" :
try :
self . oauth . request_token ( grant_type = self . standard_grant_type , redirect_uri = self . redirect_uri , code = code )
self . refresh_token = self . oauth . refresh_token
except HTTPError as e :
if e . code == 401 :
raise DeviantartError ( "Unauthorized: Please check your credentials (client_id and client_secret)." )
else :
raise DeviantartError ( e )
elif self . standard_grant_type == "client_credentials" :
try :
self . oauth . request_token ( grant_type = self . standard_grant_type )
except HTTPError as e :
if e . code == 401 :
raise DeviantartError ( "Unauthorized: Please check your credentials (client_id and client_secret)." )
else :
raise DeviantartError ( e )
else :
raise DeviantartError ( 'Unknown grant type.' )
self . access_token = self . oauth . access_token
|
def get_data ( self , create = False ) :
"""Get dict stored for current running task . Return ` None `
or an empty dict if no data was found depending on the
` create ` argument value .
: param create : if argument is ` True ` , create empty dict
for task , default : ` False `"""
|
task = asyncio_current_task ( loop = self . loop )
if task :
task_id = id ( task )
if create and task_id not in self . data :
self . data [ task_id ] = { }
task . add_done_callback ( self . del_data )
return self . data . get ( task_id )
return None
|
def can_create_submission ( self , user = None ) :
'''Central access control for submitting things related to assignments .'''
|
if user : # Super users , course owners and tutors should be able to test their validations
# before the submission is officially possible .
# They should also be able to submit after the deadline .
if user . is_superuser or user is self . course . owner or self . course . tutors . filter ( pk = user . pk ) . exists ( ) :
return True
if self . course not in user . profile . user_courses ( ) : # The user is not enrolled in this assignment ' s course .
logger . debug ( 'Submission not possible, user not enrolled in the course.' )
return False
if user . authored . filter ( assignment = self ) . exclude ( state = Submission . WITHDRAWN ) . count ( ) > 0 : # User already has a valid submission for this assignment .
logger . debug ( 'Submission not possible, user already has one for this assignment.' )
return False
if self . hard_deadline and self . hard_deadline < timezone . now ( ) : # Hard deadline has been reached .
logger . debug ( 'Submission not possible, hard deadline passed.' )
return False
if self . publish_at > timezone . now ( ) and not user . profile . can_see_future ( ) : # The assignment has not yet been published .
logger . debug ( 'Submission not possible, assignment has not yet been published.' )
return False
return True
|
def dimension ( self ) :
"""output dimension"""
|
if self . C00 is None : # no data yet
if isinstance ( self . dim , int ) : # return user choice
warnings . warn ( 'Returning user-input for dimension, since this model has not yet been estimated.' )
return self . dim
raise RuntimeError ( 'Please call set_model_params prior using this method.' )
if not self . _svd_performed :
self . _diagonalize ( )
return self . _dimension ( self . _rank0 , self . _rankt , self . dim , self . singular_values )
|
def add ( self , p , q ) :
"""perform elliptic curve addition"""
|
if p . iszero ( ) :
return q
if q . iszero ( ) :
return p
lft = 0
# calculate the slope of the intersection line
if p == q :
if p . y == 0 :
return self . zero ( )
lft = ( 3 * p . x ** 2 + self . a ) / ( 2 * p . y )
elif p . x == q . x :
return self . zero ( )
else :
lft = ( p . y - q . y ) / ( p . x - q . x )
# calculate the intersection point
x = lft ** 2 - ( p . x + q . x )
y = lft * ( p . x - x ) - p . y
return self . point ( x , y )
|
def request ( self , method , path , options = None , payload = None , heartbeater = None , retry_count = 0 ) :
"""Make a request to the Service Registry API .
@ param method : HTTP method ( ' POST ' , ' GET ' , etc . ) .
@ type method : C { str }
@ param path : Path to be appended to base URL ( ' / sessions ' , etc . ) .
@ type path : C { str }
@ param options : Options to be encoded as query parameters in the URL .
@ type options : C { dict }
@ param payload : Optional body
@ type payload : C { dict }
@ param heartbeater : Optional heartbeater passed in when
creating a session .
@ type heartbeater : L { HeartBeater }"""
|
def _request ( authHeaders , options , payload , heartbeater , retry_count ) :
tenantId = authHeaders [ 'X-Tenant-Id' ]
requestUrl = self . baseUrl + tenantId + path
if options :
requestUrl += '?' + urlencode ( options )
payload = StringProducer ( json . dumps ( payload ) ) if payload else None
d = self . agent . request ( method = method , uri = requestUrl , headers = None , bodyProducer = payload )
d . addCallback ( self . cbRequest , method , path , options , payload , heartbeater , retry_count )
return d
d = self . agent . getAuthHeaders ( )
d . addCallback ( _request , options , payload , heartbeater , retry_count )
return d
|
def is_human ( data , builds = None ) :
"""Check if human , optionally with build number , search by name or extra GL contigs ."""
|
def has_build37_contigs ( data ) :
for contig in ref . file_contigs ( dd . get_ref_file ( data ) ) :
if contig . name . startswith ( "GL" ) or contig . name . find ( "_gl" ) >= 0 :
if contig . name in naming . GMAP [ "hg19" ] or contig . name in naming . GMAP [ "GRCh37" ] :
return True
return False
if not builds and tz . get_in ( [ "genome_resources" , "aliases" , "human" ] , data ) :
return True
if not builds or "37" in builds :
target_builds = [ "hg19" , "GRCh37" ]
if any ( [ dd . get_genome_build ( data ) . startswith ( b ) for b in target_builds ] ) :
return True
elif has_build37_contigs ( data ) :
return True
if not builds or "38" in builds :
target_builds = [ "hg38" ]
if any ( [ dd . get_genome_build ( data ) . startswith ( b ) for b in target_builds ] ) :
return True
return False
|
def AddBookkeepingOperators ( model ) :
"""This adds a few bookkeeping operators that we can inspect later .
These operators do not affect the training procedure : they only collect
statistics and prints them to file or to logs ."""
|
# Print basically prints out the content of the blob . to _ file = 1 routes the
# printed output to a file . The file is going to be stored under
# root _ folder / [ blob name ]
model . Print ( 'accuracy' , [ ] , to_file = 1 )
model . Print ( 'loss' , [ ] , to_file = 1 )
# Summarizes the parameters . Different from Print , Summarize gives some
# statistics of the parameter , such as mean , std , min and max .
for param in model . params :
model . Summarize ( param , [ ] , to_file = 1 )
model . Summarize ( model . param_to_grad [ param ] , [ ] , to_file = 1 )
|
def refine ( args ) :
"""% prog refine breakpoints . bed gaps . bed
Find gaps within or near breakpoint region .
For breakpoint regions with no gaps , there are two options :
- Break in the middle of the region
- Break at the closest gap ( - - closest )"""
|
p = OptionParser ( refine . __doc__ )
p . add_option ( "--closest" , default = False , action = "store_true" , help = "In case of no gaps, use closest [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
breakpointsbed , gapsbed = args
ncols = len ( open ( breakpointsbed ) . next ( ) . split ( ) )
logging . debug ( "File {0} contains {1} columns." . format ( breakpointsbed , ncols ) )
cmd = "intersectBed -wao -a {0} -b {1}" . format ( breakpointsbed , gapsbed )
pf = "{0}.{1}" . format ( breakpointsbed . split ( "." ) [ 0 ] , gapsbed . split ( "." ) [ 0 ] )
ingapsbed = pf + ".bed"
sh ( cmd , outfile = ingapsbed )
fp = open ( ingapsbed )
data = [ x . split ( ) for x in fp ]
nogapsbed = pf + ".nogaps.bed"
largestgapsbed = pf + ".largestgaps.bed"
nogapsfw = open ( nogapsbed , "w" )
largestgapsfw = open ( largestgapsbed , "w" )
for b , gaps in groupby ( data , key = lambda x : x [ : ncols ] ) :
gaps = list ( gaps )
gap = gaps [ 0 ]
if len ( gaps ) == 1 and gap [ - 1 ] == "0" :
assert gap [ - 3 ] == "."
print ( "\t" . join ( b ) , file = nogapsfw )
continue
gaps = [ ( int ( x [ - 1 ] ) , x ) for x in gaps ]
maxgap = max ( gaps ) [ 1 ]
print ( "\t" . join ( maxgap ) , file = largestgapsfw )
nogapsfw . close ( )
largestgapsfw . close ( )
beds = [ largestgapsbed ]
toclean = [ nogapsbed , largestgapsbed ]
if opts . closest :
closestgapsbed = pf + ".closestgaps.bed"
cmd = "closestBed -a {0} -b {1} -d" . format ( nogapsbed , gapsbed )
sh ( cmd , outfile = closestgapsbed )
beds += [ closestgapsbed ]
toclean += [ closestgapsbed ]
else :
pointbed = pf + ".point.bed"
pbed = Bed ( )
bed = Bed ( nogapsbed )
for b in bed :
pos = ( b . start + b . end ) / 2
b . start , b . end = pos , pos
pbed . append ( b )
pbed . print_to_file ( pointbed )
beds += [ pointbed ]
toclean += [ pointbed ]
refinedbed = pf + ".refined.bed"
FileMerger ( beds , outfile = refinedbed ) . merge ( )
# Clean - up
FileShredder ( toclean )
return refinedbed
|
def _getFeedContent ( self , url , excludeRead = False , continuation = None , loadLimit = 20 , since = None , until = None ) :
"""A list of items ( from a feed , a category or from URLs made with SPECIAL _ ITEMS _ URL )
Returns a dict with
: param id : ( str , feed ' s id )
: param continuation : ( str , to be used to fetch more items )
: param items : array of dits with :
- update ( update timestamp )
- author ( str , username )
- title ( str , page title )
- id ( str )
- content ( dict with content and direction )
- categories ( list of categories including states or ones provided by the feed owner )"""
|
parameters = { }
if excludeRead :
parameters [ 'xt' ] = 'user/-/state/com.google/read'
if continuation :
parameters [ 'c' ] = continuation
parameters [ 'n' ] = loadLimit
if since :
parameters [ 'ot' ] = since
if until :
parameters [ 'nt' ] = until
contentJson = self . httpGet ( url , parameters )
return json . loads ( contentJson , strict = False )
|
def add_to_environment ( self , environment ) :
"""Add the router to the given environment ."""
|
self . _env = environment . _env
self . _userdata = ffi . new_handle ( self )
ENVIRONMENT_DATA [ self . _env ] . routers [ self . name ] = self
lib . EnvAddRouterWithContext ( self . _env , self . _name . encode ( ) , self . _priority , lib . query_function , lib . print_function , lib . getc_function , lib . ungetc_function , lib . exit_function , self . _userdata )
|
def ppo_tiny_world_model ( ) :
"""Atari parameters with world model as policy ."""
|
hparams = ppo_original_params ( )
hparams . policy_network = "next_frame_basic_deterministic"
hparams_keys = hparams . values ( ) . keys ( )
video_hparams = basic_deterministic_params . next_frame_tiny ( )
for ( name , value ) in six . iteritems ( video_hparams . values ( ) ) :
if name in hparams_keys :
hparams . set_hparam ( name , value )
else :
hparams . add_hparam ( name , value )
hparams . weight_decay = 0
return hparams
|
def inferObjects ( self , bodyPlacement , maxTouches = 2 ) :
"""Touch each object with multiple sensors twice .
: returns : dict mapping the number of touches required to the number of
objects that took that many touches to be uniquely inferred . The ' None '
key is reserved for objects not recognized after ` maxTouches ` touches"""
|
for monitor in self . monitors . itervalues ( ) :
monitor . afterBodyWorldLocationChanged ( bodyPlacement )
numTouchesRequired = collections . defaultdict ( int )
for objectName , objectFeatures in self . objects . iteritems ( ) :
self . reset ( )
objectPlacement = self . objectPlacements [ objectName ]
featureIndexByColumnIterator = ( greedySensorPositions ( self . numCorticalColumns , len ( objectFeatures ) ) )
for touch in xrange ( maxTouches ) : # Choose where to place each sensor .
featureIndexByColumn = featureIndexByColumnIterator . next ( )
sensedFeatures = [ objectFeatures [ i ] for i in featureIndexByColumn ]
featureSDRByColumn = [ self . features [ ( iCol , feature [ "name" ] ) ] for iCol , feature in enumerate ( sensedFeatures ) ]
worldLocationByColumn = np . array ( [ [ objectPlacement [ 0 ] + feature [ "top" ] + feature [ "height" ] / 2 , objectPlacement [ 1 ] + feature [ "left" ] + feature [ "width" ] / 2 ] for feature in sensedFeatures ] )
for monitor in self . monitors . itervalues ( ) :
monitor . afterSensorWorldLocationChanged ( worldLocationByColumn )
egocentricLocationByColumn = worldLocationByColumn - bodyPlacement
prevCellActivity = None
for t in xrange ( self . maxSettlingTime ) :
for monitor in self . monitors . itervalues ( ) :
monitor . beforeCompute ( egocentricLocationByColumn , featureSDRByColumn , isRepeat = ( t > 0 ) )
self . compute ( egocentricLocationByColumn , featureSDRByColumn , learn = False )
cellActivity = ( tuple ( c . getAllCellActivity ( ) for c in self . corticalColumns ) , tuple ( set ( module . activeCells ) for module in self . bodyToSpecificObjectModules ) )
if cellActivity == prevCellActivity : # It settled . Cancel logging this timestep .
for monitor in self . monitors . itervalues ( ) :
monitor . clearUnflushedData ( )
break
else :
prevCellActivity = cellActivity
for monitor in self . monitors . itervalues ( ) :
monitor . flush ( )
# Check if the object is narrowed down
if self . isObjectClassified ( objectName ) :
numTouchesRequired [ touch + 1 ] += 1
break
else :
numTouchesRequired [ None ] += 1
return numTouchesRequired
|
def update_shared_file ( self , sharekey = None , title = None , description = None ) :
"""Update the editable details ( just the title and description ) of a
SharedFile .
Args :
sharekey ( str ) : Sharekey of the SharedFile to update .
title ( Optional [ str ] ) : Title of the SharedFile .
description ( Optional [ str ] ) : Description of the SharedFile
Returns :
SharedFile on success , 404 on Sharekey not found , 403 on
unauthorized ."""
|
if not sharekey :
raise Exception ( "You must specify a sharekey for the sharedfile" "you wish to update." )
if not ( title or description ) :
raise Exception ( "You must specify a title or description." )
post_data = { }
if title :
post_data [ 'title' ] = title
if description :
post_data [ 'description' ] = description
endpoint = '/api/sharedfile/{0}' . format ( sharekey )
data = self . _make_request ( 'POST' , endpoint = endpoint , data = post_data )
return SharedFile . NewFromJSON ( data )
|
def set_var ( var , value ) :
'''Set a variable in the make . conf
Return a dict containing the new value for variable : :
{ ' < variable > ' : { ' old ' : ' < old - value > ' ,
' new ' : ' < new - value > ' } }
CLI Example :
. . code - block : : bash
salt ' * ' makeconf . set _ var ' LINGUAS ' ' en ' '''
|
makeconf = _get_makeconf ( )
old_value = get_var ( var )
# If var already in file , replace its value
if old_value is not None :
__salt__ [ 'file.sed' ] ( makeconf , '^{0}=.*' . format ( var ) , '{0}="{1}"' . format ( var , value ) )
else :
_add_var ( var , value )
new_value = get_var ( var )
return { var : { 'old' : old_value , 'new' : new_value } }
|
def load_metrics ( event_dir , epoch ) :
"""Loads metrics for this epoch if they have already been written .
This reads the entire event file but it ' s small with just per - epoch metrics .
Args :
event _ dir : TODO ( koz4k ) : Document this .
epoch : TODO ( koz4k ) : Document this .
Returns :
metrics ."""
|
metrics = { }
for filename in tf . gfile . ListDirectory ( event_dir ) :
path = os . path . join ( event_dir , filename )
for event in tf . train . summary_iterator ( path ) :
if event . step == epoch and event . HasField ( "summary" ) :
value = event . summary . value [ 0 ]
metrics [ value . tag ] = value . simple_value
return metrics
|
def fill_n ( self , values , weights = None , dropna : bool = True , columns : bool = False ) :
"""Add more values at once .
Parameters
values : array _ like
Values to add . Can be array of shape ( count , ndim ) or
array of shape ( ndim , count ) [ use columns = True ] or something
convertible to it
weights : array _ like
Weights for values ( optional )
dropna : bool
Whether to remove NaN values . If False and such value is met ,
exception is thrown .
columns : bool
Signal that the data are transposed ( in columns , instead of rows ) .
This allows to pass list of arrays in values ."""
|
values = np . asarray ( values )
if values . ndim != 2 :
raise RuntimeError ( "Expecting 2D array of values." )
if columns :
values = values . T
if values . shape [ 1 ] != self . ndim :
raise RuntimeError ( "Expecting array with {0} columns" . format ( self . ndim ) )
if dropna :
values = values [ ~ np . isnan ( values ) . any ( axis = 1 ) ]
if weights is not None :
weights = np . asarray ( weights )
# TODO : Check for weights size ?
self . _coerce_dtype ( weights . dtype )
for i , binning in enumerate ( self . _binnings ) :
if binning . is_adaptive ( ) :
map = binning . force_bin_existence ( values [ : , i ] )
# TODO : Add to some test
self . _reshape_data ( binning . bin_count , map , i )
frequencies , errors2 , missed = calculate_frequencies ( values , self . ndim , self . _binnings , weights = weights )
self . _frequencies += frequencies
self . _errors2 += errors2
self . _missed [ 0 ] += missed
|
def _split_regions ( chrom , start , end ) :
"""Split regions longer than 100kb into smaller sections ."""
|
window_size = 1e5
if end - start < window_size * 5 :
return [ ( chrom , start , end ) ]
else :
out = [ ]
for r in pybedtools . BedTool ( ) . window_maker ( w = window_size , b = pybedtools . BedTool ( "%s\t%s\t%s" % ( chrom , start , end ) , from_string = True ) ) :
out . append ( ( r . chrom , r . start , r . end ) )
return out
|
def pdfdump ( self , filename = None , ** kargs ) :
"""pdfdump ( filename = None , layer _ shift = 0 , rebuild = 1)
Creates a PDF file describing a packet . If filename is not provided a
temporary file is created and xpdf is called .
: param filename : the file ' s filename"""
|
from scapy . config import conf
from scapy . utils import get_temp_file , ContextManagerSubprocess
canvas = self . canvas_dump ( ** kargs )
if filename is None :
fname = get_temp_file ( autoext = kargs . get ( "suffix" , ".pdf" ) )
canvas . writePDFfile ( fname )
if WINDOWS and conf . prog . pdfreader is None :
os . startfile ( fname )
else :
with ContextManagerSubprocess ( "pdfdump()" , conf . prog . pdfreader ) :
subprocess . Popen ( [ conf . prog . pdfreader , fname ] )
else :
canvas . writePDFfile ( filename )
print ( )
|
def run_encoder ( self , param_dict , encoder_dict ) :
"""run the encoder on a supplied param _ dict"""
|
X_dict = { }
Xcol_dict = { }
# put each column of X in Xbycol _ dict
Xbycol_dict = { }
for key in encoder_dict :
if ( key != 'twoway' ) and ( key != 'threeway' ) and ( key != 'trimmed_columns' ) :
encoder = encoder_dict [ key ]
param_values = param_dict [ key ]
Xsub , names = encoder ( key , param_values )
X_dict [ key ] = Xsub
Xcol_dict [ key ] = names
for i in np . arange ( 0 , len ( names ) ) :
Xbycol_dict [ names [ i ] ] = Xsub [ : , i ]
# now do interactions
inter_list = self . _inter_list
for interaction in inter_list :
if 'twoway' in encoder_dict . keys ( ) :
encoder = encoder_dict [ 'twoway' ]
param_name1 = interaction [ 0 ]
param_name2 = interaction [ 1 ]
col_names1 = Xcol_dict [ param_name1 ]
col_names2 = Xcol_dict [ param_name2 ]
X_int , names = encoder ( param_name1 , param_name2 , col_names1 , col_names2 , X_dict )
# put columns into Xbycol _ dict
for i in np . arange ( 0 , len ( names ) ) :
Xbycol_dict [ names [ i ] ] = X_int [ : , i ]
if 'threeway' in encoder_dict . keys ( ) :
encoder = encoder_dict [ 'threeway' ]
param_name1 = interaction [ 0 ]
param_name2 = interaction [ 1 ]
param_name3 = interaction [ 2 ]
col_names1 = Xcol_dict [ param_name1 ]
col_names2 = Xcol_dict [ param_name2 ]
col_names3 = Xcol_dict [ param_name3 ]
X_int , names = encoder ( param_name1 , param_name2 , param_name3 , col_names1 , col_names2 , col_names3 , X_dict )
# put columns into Xbycol _ dict
for i in np . arange ( 0 , len ( names ) ) :
Xbycol_dict [ names [ i ] ] = X_int [ : , i ]
# remove columns that were trimmed ( if any )
trimmed_columns = encoder_dict [ 'trimmed_columns' ]
full_columns = Xbycol_dict . keys ( )
used_columns = [ x for x in full_columns if x not in trimmed_columns ]
# make design matrix array
X = [ ]
for name in used_columns :
X . append ( Xbycol_dict [ name ] )
# always add intercept column last
X . insert ( 0 , np . ones ( np . shape ( X [ 0 ] ) ) )
used_columns . insert ( 0 , 'Intercept' )
# final design matrix
X = np . vstack ( X ) . T
return X , used_columns
|
def kv_format_dict ( d , keys = None , separator = DEFAULT_SEPARATOR ) :
"""Formats the given dictionary ` ` d ` ` .
For more details see : func : ` kv _ format ` .
: param collections . Mapping d :
Dictionary containing values to format .
: param collections . Iterable keys :
List of keys to extract from the dict .
: param str separator :
Value between two pairs .
: return :
Key - Value formatted content generated from ` ` d ` ` .
: rtype :
: data : ` six . text _ type < six : six . text _ type > `"""
|
return _format_pairs ( dump_dict ( d , keys ) , separator = separator )
|
def _get_attr_list ( self , attr ) :
"""Return user ' s attribute / attributes"""
|
a = self . _attrs . get ( attr )
if not a :
return [ ]
if type ( a ) is list :
r = [ i . decode ( 'utf-8' , 'ignore' ) for i in a ]
else :
r = [ a . decode ( 'utf-8' , 'ignore' ) ]
return r
|
def find_old_vidyo_rooms ( max_room_event_age ) :
"""Finds all Vidyo rooms that are :
- linked to no events
- linked only to events whose start date precedes today - max _ room _ event _ age days"""
|
recently_used = ( db . session . query ( VCRoom . id ) . filter ( VCRoom . type == 'vidyo' , Event . end_dt > ( now_utc ( ) - timedelta ( days = max_room_event_age ) ) ) . join ( VCRoom . events ) . join ( VCRoomEventAssociation . event ) . group_by ( VCRoom . id ) )
# non - deleted rooms with no recent associations
return VCRoom . find_all ( VCRoom . status != VCRoomStatus . deleted , ~ VCRoom . id . in_ ( recently_used ) )
|
def vendorize ( vendor_requirements ) :
"""This is the main entry point for vendorizing requirements . It expects
a list of tuples that should contain the name of the library and the
version .
For example , a library ` ` foo ` ` with version ` ` 0.0.1 ` ` would look like : :
vendor _ requirements = [
( ' foo ' , ' 0.0.1 ' ) ,"""
|
for library in vendor_requirements :
if len ( library ) == 2 :
name , version = library
cmd = None
elif len ( library ) == 3 : # a possible cmd we need to run
name , version , cmd = library
vendor_library ( name , version , cmd )
|
def validate_trail_settings ( self , ct , aws_region , trail ) :
"""Validates logging , SNS and S3 settings for the global trail .
Has the capability to :
- start logging for the trail
- create SNS topics & queues
- configure or modify a S3 bucket for logging"""
|
self . log . debug ( 'Validating trail {}/{}/{}' . format ( self . account . account_name , aws_region , trail [ 'Name' ] ) )
status = ct . get_trail_status ( Name = trail [ 'Name' ] )
if not status [ 'IsLogging' ] :
self . log . warning ( 'Logging is disabled for {}/{}/{}' . format ( self . account . account_name , aws_region , trail [ 'Name' ] ) )
self . start_logging ( aws_region , trail [ 'Name' ] )
if 'SnsTopicName' not in trail or not trail [ 'SnsTopicName' ] :
self . log . warning ( 'SNS Notifications not enabled for {}/{}/{}' . format ( self . account . account_name , aws_region , trail [ 'Name' ] ) )
self . create_sns_topic ( aws_region )
self . enable_sns_notification ( aws_region , trail [ 'Name' ] )
if not self . validate_sns_topic_subscription ( aws_region ) :
self . log . warning ( 'SNS Notification configured but not subscribed for {}/{}/{}' . format ( self . account . account_name , aws_region , trail [ 'Name' ] ) )
self . subscribe_sns_topic_to_sqs ( aws_region )
if trail [ 'S3BucketName' ] != self . bucket_name :
self . log . warning ( 'CloudTrail is logging to an incorrect bucket for {}/{}/{}' . format ( self . account . account_name , trail [ 'S3BucketName' ] , trail [ 'Name' ] ) )
self . set_s3_bucket ( aws_region , trail [ 'Name' ] , self . bucket_name )
if not trail . get ( 'S3KeyPrefix' ) or trail [ 'S3KeyPrefix' ] != self . account . account_name :
self . log . warning ( 'Missing or incorrect S3KeyPrefix for {}/{}/{}' . format ( self . account . account_name , aws_region , trail [ 'Name' ] ) )
self . set_s3_prefix ( aws_region , trail [ 'Name' ] )
|
def enum_to_yaml ( cls : Type [ T_EnumToYAML ] , representer : Representer , data : T_EnumToYAML ) -> ruamel . yaml . nodes . ScalarNode :
"""Encodes YAML representation .
This is a mixin method for writing enum values to YAML . It needs to be added to the enum
as a classmethod . See the module docstring for further information on this approach and how
to implement it .
This method writes whatever is used in the string representation of the YAML value .
Usually , this will be the unique name of the enumeration value . If the name is used ,
the corresponding ` ` EnumFromYAML ` ` mixin can be used to recreate the value . If the name
isn ' t used , more care may be necessary , so a ` ` from _ yaml ` ` method for that particular
enumeration may be necessary .
Note :
This method assumes that the name of the enumeration value should be stored as a scalar node .
Args :
representer : Representation from YAML .
data : Enumeration value to be encoded .
Returns :
Scalar representation of the name of the enumeration value ."""
|
return representer . represent_scalar ( f"!{cls.__name__}" , f"{str(data)}" )
|
def get_default_ref ( repo ) :
"""Return a ` github . GitRef ` object for the HEAD of the default branch .
Parameters
repo : github . Repository . Repository
repo to get default branch head ref from
Returns
head : : class : ` github . GitRef ` instance
Raises
github . RateLimitExceededException
codekit . pygithub . CaughtRepositoryError"""
|
assert isinstance ( repo , github . Repository . Repository ) , type ( repo )
# XXX this probably should be resolved via repos . yaml
default_branch = repo . default_branch
default_branch_ref = "heads/{ref}" . format ( ref = default_branch )
# if accessing the default branch fails something is seriously wrong . . .
try :
head = repo . get_git_ref ( default_branch_ref )
except github . RateLimitExceededException :
raise
except github . GithubException as e :
msg = "error getting ref: {ref}" . format ( ref = default_branch_ref )
raise CaughtRepositoryError ( repo , e , msg ) from None
return head
|
def __collect_trace_data ( self , request , response , error , latency ) :
"""Collects the tracing data from the given parameters .
: param request : The Flask request .
: param response : The flask response .
: param error : The error occurred if any .
: param latency : The time elapsed to process the request .
: return : The tracing data ."""
|
data = OrderedDict ( )
data [ 'latency' ] = latency . elapsed
data [ 'request_method' ] = request . environ [ 'REQUEST_METHOD' ]
data [ 'request_url' ] = request . url
data [ 'request_headers' ] = request . headers
body = request . get_data ( as_text = True )
if body :
data [ 'request_body' ] = body
if response :
data [ 'response_status' ] = response . status_code
if error :
data [ 'error' ] = str ( error )
return data
|
def comparable ( self ) :
"""str : comparable representation of the path specification ."""
|
string_parts = [ ]
string_parts . append ( 'table name: {0:s}' . format ( self . table_name ) )
string_parts . append ( 'column name: {0:s}' . format ( self . column_name ) )
if self . row_condition is not None :
row_condition_string = ' ' . join ( [ '{0!s}' . format ( value ) for value in self . row_condition ] )
string_parts . append ( 'row condition: "{0:s}"' . format ( row_condition_string ) )
if self . row_index is not None :
string_parts . append ( 'row index: {0:d}' . format ( self . row_index ) )
return self . _GetComparable ( sub_comparable_string = ', ' . join ( string_parts ) )
|
def get_body ( self ) :
'''Get the response Body
: returns Body : A Body object containing the response .'''
|
if self . _body is None :
resp = self . _dispatcher . _dispatch ( self . request )
self . _body = self . _create_body ( resp )
return self . _body
|
def gen_weights ( self , f_target ) :
"""Generate a set of weights over the basis functions such
that the target forcing term trajectory is matched .
f _ target np . array : the desired forcing term trajectory"""
|
# calculate x and psi
x_track = self . cs . rollout ( )
psi_track = self . gen_psi ( x_track )
# efficiently calculate weights for BFs using weighted linear regression
self . w = np . zeros ( ( self . dmps , self . bfs ) )
for d in range ( self . dmps ) : # spatial scaling term
k = 1.
# ( self . goal [ d ] - self . y0 [ d ] )
for b in range ( self . bfs ) :
numer = np . sum ( x_track * psi_track [ : , b ] * f_target [ : , d ] )
denom = np . sum ( x_track ** 2 * psi_track [ : , b ] )
self . w [ d , b ] = numer / ( k * denom )
|
def first ( self , predicate = None ) :
'''The first element in a sequence ( optionally satisfying a predicate ) .
If the predicate is omitted or is None this query returns the first
element in the sequence ; otherwise , it returns the first element in
the sequence for which the predicate evaluates to True . Exceptions are
raised if there is no such element .
Note : This method uses immediate execution .
Args :
predicate : An optional unary predicate function , the only argument
to which is the element . The return value should be True for
matching elements , otherwise False . If the predicate is
omitted or None the first element of the source sequence will
be returned .
Returns :
The first element of the sequence if predicate is None , otherwise
the first element for which the predicate returns True .
Raises :
ValueError : If the Queryable is closed .
ValueError : If the source sequence is empty .
ValueError : If there are no elements matching the predicate .
TypeError : If the predicate is not callable .'''
|
if self . closed ( ) :
raise ValueError ( "Attempt to call first() on a closed Queryable." )
return self . _first ( ) if predicate is None else self . _first_predicate ( predicate )
|
def run_reducer ( self , stdin = sys . stdin , stdout = sys . stdout ) :
"""Run the reducer on the hadoop node ."""
|
self . init_hadoop ( )
self . init_reducer ( )
outputs = self . _reduce_input ( self . internal_reader ( ( line [ : - 1 ] for line in stdin ) ) , self . reducer , self . final_reducer )
self . writer ( outputs , stdout )
|
def pad2d ( data , padding ) :
"""Pad array
This method pads an input numpy array with zeros in all directions .
Parameters
data : np . ndarray
Input data array ( at least 2D )
padding : int , tuple
Amount of padding in x and y directions , respectively
Returns
np . ndarray padded data
Notes
Adjustment to numpy . pad ( )
Examples
> > > from modopt . base . np _ adjust import pad2d
> > > x = np . arange ( 9 ) . reshape ( ( 3 , 3 ) )
array ( [ [ 0 , 1 , 2 ] ,
[3 , 4 , 5 ] ,
[6 , 7 , 8 ] ] )
> > > pad2d ( x , ( 1 , 1 ) )
array ( [ [ 0 , 0 , 0 , 0 , 0 ] ,
[0 , 0 , 1 , 2 , 0 ] ,
[0 , 3 , 4 , 5 , 0 ] ,
[0 , 6 , 7 , 8 , 0 ] ,
[0 , 0 , 0 , 0 , 0 ] ] )"""
|
data = np . array ( data )
if isinstance ( padding , int ) :
padding = np . array ( [ padding ] )
elif isinstance ( padding , ( tuple , list ) ) :
padding = np . array ( padding )
elif isinstance ( padding , np . ndarray ) :
pass
else :
raise ValueError ( 'Padding must be an integer or a tuple (or list, ' 'np.ndarray) of itegers' )
if padding . size == 1 :
padding = np . repeat ( padding , 2 )
return np . pad ( data , ( ( padding [ 0 ] , padding [ 0 ] ) , ( padding [ 1 ] , padding [ 1 ] ) ) , 'constant' )
|
def friendly_format ( self ) :
"""Serialize to a format more suitable for displaying to end users ."""
|
if self . description is not None :
msg = self . description
else :
msg = 'errorCode: {} / detailCode: {}' . format ( self . errorCode , self . detailCode )
return self . _fmt ( self . name , msg )
|
def parse ( self , sentence ) :
"""Parse raw sentence into ConllSentence
Parameters
sentence : list
a list of ( word , tag ) tuples
Returns
ConllSentence
ConllSentence object"""
|
words = np . zeros ( ( len ( sentence ) + 1 , 1 ) , np . int32 )
tags = np . zeros ( ( len ( sentence ) + 1 , 1 ) , np . int32 )
words [ 0 , 0 ] = ParserVocabulary . ROOT
tags [ 0 , 0 ] = ParserVocabulary . ROOT
vocab = self . _vocab
for i , ( word , tag ) in enumerate ( sentence ) :
words [ i + 1 , 0 ] , tags [ i + 1 , 0 ] = vocab . word2id ( word . lower ( ) ) , vocab . tag2id ( tag )
with mx . Context ( mxnet_prefer_gpu ( ) ) :
outputs = self . _parser . forward ( words , tags )
words = [ ]
for arc , rel , ( word , tag ) in zip ( outputs [ 0 ] [ 0 ] , outputs [ 0 ] [ 1 ] , sentence ) :
words . append ( ConllWord ( id = len ( words ) + 1 , form = word , pos = tag , head = arc , relation = vocab . id2rel ( rel ) ) )
return ConllSentence ( words )
|
def _get_attachments ( self , id ) :
"""Retrieve a list of attachments associated with this Xero object ."""
|
uri = '/' . join ( [ self . base_url , self . name , id , 'Attachments' ] ) + '/'
return uri , { } , 'get' , None , None , False
|
def get_images ( self , limit = None ) :
"""Return all of the images associated with the user ."""
|
url = ( self . _imgur . _base_url + "/3/account/{0}/" "images/{1}" . format ( self . name , '{}' ) )
resp = self . _imgur . _send_request ( url , limit = limit )
return [ Image ( img , self . _imgur ) for img in resp ]
|
def render_issue ( self , description = '' , traceback = '' ) :
"""Render issue before sending it to Github"""
|
# Get component versions
versions = get_versions ( )
# Get git revision for development version
revision = ''
if versions [ 'revision' ] :
revision = versions [ 'revision' ]
# Make a description header in case no description is supplied
if not description :
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback :
error_section = ( "### Traceback\n" "```python-traceback\n" "{}\n" "```" . format ( traceback ) )
else :
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""" . format ( description = description , error_section = error_section , spyder_version = versions [ 'spyder' ] , commit = revision , python_version = versions [ 'python' ] , qt_version = versions [ 'qt' ] , qt_api_name = versions [ 'qt_api' ] , qt_api_version = versions [ 'qt_api_ver' ] , os_name = versions [ 'system' ] , os_version = versions [ 'release' ] , dependencies = dependencies . status ( ) )
return issue_template
|
def retry ( ExceptionToCheck , tries = 3 , delay = 1 , backoff = 1 ) :
"""Retry calling the decorated function using an exponential backoff .
http : / / www . saltycrane . com / blog / 2009/11 / trying - out - retry - decorator - python /
original from : http : / / wiki . python . org / moin / PythonDecoratorLibrary # Retry
: param ExceptionToCheck : the exception to check . may be a tuple of
exceptions to check
: type ExceptionToCheck : Exception or tuple
: param tries : number of times to try ( not retry ) before giving up
: type tries : int
: param delay : initial delay between retries in seconds
: type delay : int
: param backoff : backoff multiplier e . g . value of 2 will double the delay
each retry
: type backoff : int"""
|
def deco_retry ( f ) :
@ functools . wraps ( f )
def f_retry ( * args , ** kwargs ) :
mtries , mdelay = tries , delay
while mtries > 1 :
try :
return f ( * args , ** kwargs )
except ExceptionToCheck :
time . sleep ( mdelay )
mtries -= 1
mdelay *= backoff
return f ( * args , ** kwargs )
return f_retry
# true decorator
return deco_retry
|
def add_ip_address ( list_name , item_name ) :
'''Add an IP address to an IP address list .
list _ name ( str ) : The name of the specific policy IP address list to append to .
item _ name ( str ) : The IP address to append to the list .
CLI Example :
. . code - block : : bash
salt ' * ' bluecoat _ sslv . add _ ip _ address MyIPAddressList 10.0.0.0/24'''
|
payload = { "jsonrpc" : "2.0" , "id" : "ID0" , "method" : "add_policy_ip_addresses" , "params" : [ list_name , { "item_name" : item_name } ] }
response = __proxy__ [ 'bluecoat_sslv.call' ] ( payload , True )
return _validate_change_result ( response )
|
def _get_application ( self , subdomain ) :
"""Return a WSGI application for subdomain . The subdomain is
passed to the create _ application constructor as a keyword argument .
: param subdomain : Subdomain to get or create an application with"""
|
with self . lock :
app = self . instances . get ( subdomain )
if app is None :
app = self . create_application ( subdomain = subdomain )
self . instances [ subdomain ] = app
return app
|
def add ( self , * args , ** kwargs ) :
"""Add Cookie objects by their names , or create new ones under
specified names .
Any unnamed arguments are interpreted as existing cookies , and
are added under the value in their . name attribute . With keyword
arguments , the key is interpreted as the cookie name and the
value as the UNENCODED value stored in the cookie ."""
|
# Only the first one is accessible through the main interface ,
# others accessible through get _ all ( all _ cookies ) .
for cookie in args :
self . all_cookies . append ( cookie )
if cookie . name in self :
continue
self [ cookie . name ] = cookie
for key , value in kwargs . items ( ) :
cookie = self . cookie_class ( key , value )
self . all_cookies . append ( cookie )
if key in self :
continue
self [ key ] = cookie
|
def log_entry_encode ( self , id , num_logs , last_log_num , time_utc , size ) :
'''Reply to LOG _ REQUEST _ LIST
id : Log id ( uint16 _ t )
num _ logs : Total number of logs ( uint16 _ t )
last _ log _ num : High log number ( uint16 _ t )
time _ utc : UTC timestamp of log in seconds since 1970 , or 0 if not available ( uint32 _ t )
size : Size of the log ( may be approximate ) in bytes ( uint32 _ t )'''
|
return MAVLink_log_entry_message ( id , num_logs , last_log_num , time_utc , size )
|
def _autodetect_std ( self , cmd = "" , search_patterns = None , re_flags = re . I , priority = 99 ) :
"""Standard method to try to auto - detect the device type . This method will be called for each
device _ type present in SSH _ MAPPER _ BASE dict ( ' dispatch ' key ) . It will attempt to send a
command and match some regular expression from the ouput for each entry in SSH _ MAPPER _ BASE
( ' cmd ' and ' search _ pattern ' keys ) .
Parameters
cmd : str
The command to send to the remote device after checking cache .
search _ patterns : list
A list of regular expression to look for in the command ' s output ( default : None ) .
re _ flags : re . flags , optional
Any flags from the python re module to modify the regular expression ( default : re . I ) .
priority : int , optional
The confidence the match is right between 0 and 99 ( default : 99 ) ."""
|
invalid_responses = [ r"% Invalid input detected" , r"syntax error, expecting" , r"Error: Unrecognized command" , r"%Error" , r"command not found" , r"Syntax Error: unexpected argument" , ]
if not cmd or not search_patterns :
return 0
try : # _ send _ command _ wrapper will use already cached results if available
response = self . _send_command_wrapper ( cmd )
# Look for error conditions in output
for pattern in invalid_responses :
match = re . search ( pattern , response , flags = re . I )
if match :
return 0
for pattern in search_patterns :
match = re . search ( pattern , response , flags = re_flags )
if match :
return priority
except Exception :
return 0
return 0
|
def center_end ( r , window_size ) :
"""Center a region on its end and expand it to window _ size bases .
: return : the new region ."""
|
res = copy . copy ( r )
res . start = res . end - window_size / 2
res . end = res . start + window_size
return res
|
def create ( cls , cli , src_resource_id , dst_resource_id , max_time_out_of_sync , name = None , members = None , auto_initiate = None , hourly_snap_replication_policy = None , daily_snap_replication_policy = None , replicate_existing_snaps = None , remote_system = None , src_spa_interface = None , src_spb_interface = None , dst_spa_interface = None , dst_spb_interface = None ) :
"""Creates a replication session .
: param cli : the rest cli .
: param src _ resource _ id : id of the replication source , could be
lun / fs / cg .
: param dst _ resource _ id : id of the replication destination .
: param max _ time _ out _ of _ sync : maximum time to wait before syncing the
source and destination . Value ` - 1 ` means the automatic sync is not
performed . ` 0 ` means it is a sync replication .
: param name : name of the replication .
: param members : list of ` UnityLunMemberReplication ` object . If
` src _ resource ` is cg , ` lunMemberReplication ` list need to pass in
to this parameter as member lun pairing between source and
destination cg .
: param auto _ initiate : indicates whether to perform the first
replication sync automatically .
True - perform the first replication sync automatically .
False - perform the first replication sync manually .
: param hourly _ snap _ replication _ policy : ` UnitySnapReplicationPolicy `
object . The policy for replicating hourly scheduled snaps of the
source resource .
: param daily _ snap _ replication _ policy : ` UnitySnapReplicationPolicy `
object . The policy for replicating daily scheduled snaps of the
source resource .
: param replicate _ existing _ snaps : indicates whether or not to replicate
snapshots already existing on the resource .
: param remote _ system : ` UnityRemoteSystem ` object . The remote system of
remote replication .
: param src _ spa _ interface : ` UnityRemoteInterface ` object . The
replication interface for source SPA .
: param src _ spb _ interface : ` UnityRemoteInterface ` object . The
replication interface for source SPB .
: param dst _ spa _ interface : ` UnityRemoteInterface ` object . The
replication interface for destination SPA .
: param dst _ spb _ interface : ` UnityRemoteInterface ` object . The
replication interface for destination SPB .
: return : the newly created replication session ."""
|
req_body = cli . make_body ( srcResourceId = src_resource_id , dstResourceId = dst_resource_id , maxTimeOutOfSync = max_time_out_of_sync , members = members , autoInitiate = auto_initiate , name = name , hourlySnapReplicationPolicy = hourly_snap_replication_policy , dailySnapReplicationPolicy = daily_snap_replication_policy , replicateExistingSnaps = replicate_existing_snaps , remoteSystem = remote_system , srcSPAInterface = src_spa_interface , srcSPBInterface = src_spb_interface , dstSPAInterface = dst_spa_interface , dstSPBInterface = dst_spb_interface )
resp = cli . post ( cls ( ) . resource_class , ** req_body )
resp . raise_if_err ( )
return cls . get ( cli , resp . resource_id )
|
def active_vectors_info ( self ) :
"""Return the active scalar ' s field and name : [ field , name ]"""
|
if not hasattr ( self , '_active_vectors_info' ) :
self . _active_vectors_info = [ POINT_DATA_FIELD , None ]
# field and name
_ , name = self . _active_vectors_info
# rare error where scalar name isn ' t a valid scalar
if name not in self . point_arrays :
if name not in self . cell_arrays :
name = None
return self . _active_vectors_info
|
def tree_to_dot ( tree : BubbleTree , dotfile : str = None , render : bool = False ) :
"""Write in dotfile a graph equivalent to those depicted in bubble file
See http : / / graphviz . readthedocs . io / en / latest / examples . html # cluster - py
for graphviz API"""
|
graph = tree_to_graph ( tree )
path = None
if dotfile : # first save the dot file .
path = graph . save ( dotfile )
if render : # secondly , show it .
# As the dot file is known by the Graph object ,
# it will be placed around the dot file .
graph . view ( )
return path
|
def _record_count ( self ) :
"""Get number of records in file .
This is maybe suboptimal because we have to seek to the end of
the file .
Side effect : returns file position to record _ start ."""
|
self . filepath_or_buffer . seek ( 0 , 2 )
total_records_length = ( self . filepath_or_buffer . tell ( ) - self . record_start )
if total_records_length % 80 != 0 :
warnings . warn ( "xport file may be corrupted" )
if self . record_length > 80 :
self . filepath_or_buffer . seek ( self . record_start )
return total_records_length // self . record_length
self . filepath_or_buffer . seek ( - 80 , 2 )
last_card = self . filepath_or_buffer . read ( 80 )
last_card = np . frombuffer ( last_card , dtype = np . uint64 )
# 8 byte blank
ix = np . flatnonzero ( last_card == 2314885530818453536 )
if len ( ix ) == 0 :
tail_pad = 0
else :
tail_pad = 8 * len ( ix )
self . filepath_or_buffer . seek ( self . record_start )
return ( total_records_length - tail_pad ) // self . record_length
|
def fromobj ( obj ) :
"""Creates an OID object from the pointer to ASN1 _ OBJECT c structure .
This method intended for internal use for submodules which deal
with libcrypto ASN1 parsing functions , such as x509 or CMS"""
|
nid = libcrypto . OBJ_obj2nid ( obj )
if nid == 0 :
buf = create_string_buffer ( 80 )
dotted_len = libcrypto . OBJ_obj2txt ( buf , 80 , obj , 1 )
dotted = buf [ : dotted_len ]
oid = create ( dotted , dotted , dotted )
else :
oid = Oid ( nid )
return oid
|
def get_readme ( ) :
'Get the long description from the README file'
|
here = path . abspath ( path . dirname ( __file__ ) )
with open ( path . join ( here , 'README.rst' ) , encoding = 'utf-8' ) as my_fd :
result = my_fd . read ( )
return result
|
def writeSwarmDescription ( self , csvPath , outPath , predictedField = None , swarmParams = None ) :
"""Writes swarm description file ( JSON ) .
: param csvPath : path to CSV data
: param outPath : absolute or relative file path to write swarm JSON file
: param predictedField : ( string )
: param swarmParams : ( dict ) overrides any swarm params"""
|
if self . _confluence is None :
raise Exception ( "Missing Confluence! Cannot attempt operation requiring " "data without first loading the data." )
if predictedField is None :
predictedField = self . _predictedField
fields = self . _createFieldDescription ( )
swarmDesc = createSwarmDescription ( fields , csvPath , predictedField , swarmParams = swarmParams )
with open ( outPath , "w" ) as swarmOut :
swarmOut . write ( json . dumps ( swarmDesc ) )
|
def transfer ( cls , inputs , recipients , asset_id , metadata = None ) :
"""A simple way to generate a ` TRANSFER ` transaction .
Note :
Different cases for threshold conditions :
Combining multiple ` inputs ` with an arbitrary number of
` recipients ` can yield interesting cases for the creation of
threshold conditions we ' d like to support . The following
notation is proposed :
1 . The index of a ` recipient ` corresponds to the index of
an input :
e . g . ` transfer ( [ input1 ] , [ a ] ) ` , means ` input1 ` would now be
owned by user ` a ` .
2 . ` recipients ` can ( almost ) get arbitrary deeply nested ,
creating various complex threshold conditions :
e . g . ` transfer ( [ inp1 , inp2 ] , [ [ a , [ b , c ] ] , d ] ) ` , means
` a ` ' s signature would have a 50 % weight on ` inp1 `
compared to ` b ` and ` c ` that share 25 % of the leftover
weight respectively . ` inp2 ` is owned completely by ` d ` .
Args :
inputs ( : obj : ` list ` of : class : ` ~ bigchaindb . common . transaction .
Input ` ) : Converted ` Output ` s , intended to
be used as inputs in the transfer to generate .
recipients ( : obj : ` list ` of : obj : ` tuple ` ) : A list of
( [ keys ] , amount ) that represent the recipients of this
Transaction .
asset _ id ( str ) : The asset ID of the asset to be transferred in
this Transaction .
metadata ( dict ) : Python dictionary to be stored along with the
Transaction .
Returns :
: class : ` ~ bigchaindb . common . transaction . Transaction `"""
|
if not isinstance ( inputs , list ) :
raise TypeError ( '`inputs` must be a list instance' )
if len ( inputs ) == 0 :
raise ValueError ( '`inputs` must contain at least one item' )
if not isinstance ( recipients , list ) :
raise TypeError ( '`recipients` must be a list instance' )
if len ( recipients ) == 0 :
raise ValueError ( '`recipients` list cannot be empty' )
outputs = [ ]
for recipient in recipients :
if not isinstance ( recipient , tuple ) or len ( recipient ) != 2 :
raise ValueError ( ( 'Each `recipient` in the list must be a' ' tuple of `([<list of public keys>],' ' <amount>)`' ) )
pub_keys , amount = recipient
outputs . append ( Output . generate ( pub_keys , amount ) )
if not isinstance ( asset_id , str ) :
raise TypeError ( '`asset_id` must be a string' )
inputs = deepcopy ( inputs )
return cls ( cls . TRANSFER , { 'id' : asset_id } , inputs , outputs , metadata )
|
def set_ytick_suffix ( self , suffix ) :
"""Set ticks for the y - axis .
: param suffix : string added after each tick . If the value is
` degree ` or ` precent ` the corresponding symbols
will be added ."""
|
if suffix == 'degree' :
suffix = r'^\circ'
elif suffix == 'percent' :
suffix = r'\%'
self . ticks [ 'ysuffix' ] = suffix
|
def _check_linux ( self , instance ) :
"""_ check _ linux can be run inside a container and still collects the network metrics from the host
For that procfs _ path can be set to something like " / host / proc "
When a custom procfs _ path is set , the collect _ connection _ state option is ignored"""
|
proc_location = self . agentConfig . get ( 'procfs_path' , '/proc' ) . rstrip ( '/' )
custom_tags = instance . get ( 'tags' , [ ] )
if Platform . is_containerized ( ) and proc_location != "/proc" :
proc_location = "%s/1" % proc_location
if self . _is_collect_cx_state_runnable ( proc_location ) :
try :
self . log . debug ( "Using `ss` to collect connection state" )
# Try using ` ss ` for increased performance over ` netstat `
for ip_version in [ '4' , '6' ] :
for protocol in [ 'tcp' , 'udp' ] : # Call ` ss ` for each IP version because there ' s no built - in way of distinguishing
# between the IP versions in the output
# Also calls ` ss ` for each protocol , because on some systems ( e . g . Ubuntu 14.04 ) , there is a
# bug that print ` tcp ` even if it ' s ` udp `
output , _ , _ = get_subprocess_output ( [ "ss" , "-n" , "-{0}" . format ( protocol [ 0 ] ) , "-a" , "-{0}" . format ( ip_version ) ] , self . log )
lines = output . splitlines ( )
# State Recv - Q Send - Q Local Address : Port Peer Address : Port
# UNCONN 0 0 127.0.0.1:8125 * : *
# ESTAB 0 0 127.0.0.1:37036 127.0.0.1:8125
# UNCONN 0 0 fe80 : : a00:27ff : fe1c : 3c4:123 : : : *
# TIME - WAIT 0 0 90.56.111.177:56867 46.105.75.4:143
# LISTEN 0 0 : : ffff : 127.0.0.1:33217 : : ffff : 127.0.0.1:7199
# ESTAB 0 0 : : ffff : 127.0.0.1:58975 : : ffff : 127.0.0.1:2181
metrics = self . _parse_linux_cx_state ( lines [ 1 : ] , self . tcp_states [ 'ss' ] , 0 , protocol = protocol , ip_version = ip_version )
# Only send the metrics which match the loop iteration ' s ip version
for stat , metric in iteritems ( self . cx_state_gauge ) :
if stat [ 0 ] . endswith ( ip_version ) and stat [ 0 ] . startswith ( protocol ) :
self . gauge ( metric , metrics . get ( metric ) , tags = custom_tags )
except OSError :
self . log . info ( "`ss` not found: using `netstat` as a fallback" )
output , _ , _ = get_subprocess_output ( [ "netstat" , "-n" , "-u" , "-t" , "-a" ] , self . log )
lines = output . splitlines ( )
# Active Internet connections ( w / o servers )
# Proto Recv - Q Send - Q Local Address Foreign Address State
# tcp 0 0 46.105.75.4:80 79.220.227.193:2032 SYN _ RECV
# tcp 0 0 46.105.75.4:143 90.56.111.177:56867 ESTABLISHED
# tcp 0 0 46.105.75.4:50468 107.20.207.175:443 TIME _ WAIT
# tcp6 0 0 46.105.75.4:80 93.15.237.188:58038 FIN _ WAIT2
# tcp6 0 0 46.105.75.4:80 79.220.227.193:2029 ESTABLISHED
# udp 0 0 0.0.0.0:123 0.0.0.0 : *
# udp6 0 0 : : : 41458 : : : *
metrics = self . _parse_linux_cx_state ( lines [ 2 : ] , self . tcp_states [ 'netstat' ] , 5 )
for metric , value in iteritems ( metrics ) :
self . gauge ( metric , value , tags = custom_tags )
except SubprocessOutputEmptyError :
self . log . exception ( "Error collecting connection stats." )
proc_dev_path = "{}/net/dev" . format ( proc_location )
with open ( proc_dev_path , 'r' ) as proc :
lines = proc . readlines ( )
# Inter - | Receive | Transmit
# face | bytes packets errs drop fifo frame compressed multicast | bytes packets errs drop fifo colls carrier compressed # noqa : E501
# lo : 45890956 112797 0 0 0 0 0 0 45890956 112797 0 0 0 0 0 0 # noqa : E501
# eth0:631947052 1042233 0 19 0 184 0 1206 1208625538 1320529 0 0 0 0 0 0 # noqa : E501
# eth1 : 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 # noqa : E501
for l in lines [ 2 : ] :
cols = l . split ( ':' , 1 )
x = cols [ 1 ] . split ( )
# Filter inactive interfaces
if self . _parse_value ( x [ 0 ] ) or self . _parse_value ( x [ 8 ] ) :
iface = cols [ 0 ] . strip ( )
metrics = { 'bytes_rcvd' : self . _parse_value ( x [ 0 ] ) , 'bytes_sent' : self . _parse_value ( x [ 8 ] ) , 'packets_in.count' : self . _parse_value ( x [ 1 ] ) , 'packets_in.error' : self . _parse_value ( x [ 2 ] ) + self . _parse_value ( x [ 3 ] ) , 'packets_out.count' : self . _parse_value ( x [ 9 ] ) , 'packets_out.error' : self . _parse_value ( x [ 10 ] ) + self . _parse_value ( x [ 11 ] ) , }
self . _submit_devicemetrics ( iface , metrics , custom_tags )
netstat_data = { }
for f in [ 'netstat' , 'snmp' ] :
proc_data_path = "{}/net/{}" . format ( proc_location , f )
try :
with open ( proc_data_path , 'r' ) as netstat :
while True :
n_header = netstat . readline ( )
if not n_header :
break
# No more ? Abort !
n_data = netstat . readline ( )
h_parts = n_header . strip ( ) . split ( ' ' )
h_values = n_data . strip ( ) . split ( ' ' )
ns_category = h_parts [ 0 ] [ : - 1 ]
netstat_data [ ns_category ] = { }
# Turn the data into a dictionary
for idx , hpart in enumerate ( h_parts [ 1 : ] ) :
netstat_data [ ns_category ] [ hpart ] = h_values [ idx + 1 ]
except IOError : # On Openshift , / proc / net / snmp is only readable by root
self . log . debug ( "Unable to read %s." , proc_data_path )
nstat_metrics_names = { 'Tcp' : { 'RetransSegs' : 'system.net.tcp.retrans_segs' , 'InSegs' : 'system.net.tcp.in_segs' , 'OutSegs' : 'system.net.tcp.out_segs' , } , 'TcpExt' : { 'ListenOverflows' : 'system.net.tcp.listen_overflows' , 'ListenDrops' : 'system.net.tcp.listen_drops' , 'TCPBacklogDrop' : 'system.net.tcp.backlog_drops' , 'TCPRetransFail' : 'system.net.tcp.failed_retransmits' , } , 'Udp' : { 'InDatagrams' : 'system.net.udp.in_datagrams' , 'NoPorts' : 'system.net.udp.no_ports' , 'InErrors' : 'system.net.udp.in_errors' , 'OutDatagrams' : 'system.net.udp.out_datagrams' , 'RcvbufErrors' : 'system.net.udp.rcv_buf_errors' , 'SndbufErrors' : 'system.net.udp.snd_buf_errors' , 'InCsumErrors' : 'system.net.udp.in_csum_errors' , } , }
# Skip the first line , as it ' s junk
for k in nstat_metrics_names :
for met in nstat_metrics_names [ k ] :
if met in netstat_data . get ( k , { } ) :
self . _submit_netmetric ( nstat_metrics_names [ k ] [ met ] , self . _parse_value ( netstat_data [ k ] [ met ] ) , tags = custom_tags )
# Get the conntrack - S information
conntrack_path = instance . get ( 'conntrack_path' )
if conntrack_path is not None :
self . _add_conntrack_stats_metrics ( conntrack_path , custom_tags )
# Get the rest of the metric by reading the files . Metrics available since kernel 3.6
conntrack_files_location = os . path . join ( proc_location , 'sys' , 'net' , 'netfilter' )
# By default , only max and count are reported . However if the blacklist is set ,
# the whitelist is loosing its default value
blacklisted_files = instance . get ( 'blacklist_conntrack_metrics' )
whitelisted_files = instance . get ( 'whitelist_conntrack_metrics' )
if blacklisted_files is None and whitelisted_files is None :
whitelisted_files = [ 'max' , 'count' ]
available_files = [ ]
# Get the metrics to read
try :
for metric_file in os . listdir ( conntrack_files_location ) :
if ( os . path . isfile ( os . path . join ( conntrack_files_location , metric_file ) ) and 'nf_conntrack_' in metric_file ) :
available_files . append ( metric_file [ len ( 'nf_conntrack_' ) : ] )
except Exception as e :
self . log . debug ( "Unable to list the files in {}. {}" . format ( conntrack_files_location , e ) )
filtered_available_files = pattern_filter ( available_files , whitelist = whitelisted_files , blacklist = blacklisted_files )
for metric_name in filtered_available_files :
metric_file_location = os . path . join ( conntrack_files_location , 'nf_conntrack_{}' . format ( metric_name ) )
try :
with open ( metric_file_location , 'r' ) as conntrack_file : # Checking it ' s an integer
try :
value = int ( conntrack_file . read ( ) . rstrip ( ) )
self . gauge ( 'system.net.conntrack.{}' . format ( metric_name ) , value , tags = custom_tags )
except ValueError :
self . log . debug ( "{} is not an integer" . format ( metric_name ) )
except IOError as e :
self . log . debug ( "Unable to read {}, skipping {}." . format ( metric_file_location , e ) )
|
def get_kba_values ( kb_name , searchname = "" , searchtype = "s" ) :
"""Return an array of values " authority file " type = just values .
: param kb _ name : name of kb
: param searchname : get these values , according to searchtype
: param searchtype : s = substring , e = exact , , sw = startswith"""
|
if searchtype == 's' and searchname :
searchname = '%' + searchname + '%'
if searchtype == 'sw' and searchname : # startswith
searchname = searchname + '%'
if not searchname :
searchname = '%'
query = db . session . query ( models . KnwKBRVAL ) . join ( models . KnwKB ) . filter ( models . KnwKBRVAL . m_value . like ( searchname ) , models . KnwKB . name . like ( kb_name ) )
return [ ( k . m_value , ) for k in query . all ( ) ]
|
def check_result ( running , recurse = False , highstate = None ) :
'''Check the total return value of the run and determine if the running
dict has any issues'''
|
if not isinstance ( running , dict ) :
return False
if not running :
return False
ret = True
for state_id , state_result in six . iteritems ( running ) :
expected_type = dict
# The _ _ extend _ _ state is a list
if "__extend__" == state_id :
expected_type = list
if not recurse and not isinstance ( state_result , expected_type ) :
ret = False
if ret and isinstance ( state_result , dict ) :
result = state_result . get ( 'result' , _empty )
if result is False :
ret = False
# only override return value if we are not already failed
elif result is _empty and isinstance ( state_result , dict ) and ret :
ret = check_result ( state_result , recurse = True , highstate = highstate )
# if we detect a fail , check for onfail requisites
if not ret : # ret can be None in case of no onfail reqs , recast it to bool
ret = bool ( check_onfail_requisites ( state_id , state_result , running , highstate ) )
# return as soon as we got a failure
if not ret :
break
return ret
|
def _readtoken ( self , name , pos , length ) :
"""Reads a token from the bitstring and returns the result ."""
|
if length is not None and int ( length ) > self . length - pos :
raise ReadError ( "Reading off the end of the data. " "Tried to read {0} bits when only {1} available." . format ( int ( length ) , self . length - pos ) )
try :
val = name_to_read [ name ] ( self , length , pos )
return val , pos + length
except KeyError :
if name == 'pad' :
return None , pos + length
raise ValueError ( "Can't parse token {0}:{1}" . format ( name , length ) )
except TypeError : # This is for the ' ue ' , ' se ' and ' bool ' tokens . They will also return the new pos .
return name_to_read [ name ] ( self , pos )
|
def edit_message_media ( self , chat_id : Union [ int , str ] , message_id : int , media : InputMedia , reply_markup : "pyrogram.InlineKeyboardMarkup" = None ) -> "pyrogram.Message" :
"""Use this method to edit audio , document , photo , or video messages .
If a message is a part of a message album , then it can be edited only to a photo or a video . Otherwise ,
message type can be changed arbitrarily . When inline message is edited , new file can ' t be uploaded .
Use previously uploaded file via its file _ id or specify a URL . On success , if the edited message was sent
by the bot , the edited Message is returned , otherwise True is returned .
Args :
chat _ id ( ` ` int ` ` | ` ` str ` ` ) :
Unique identifier ( int ) or username ( str ) of the target chat .
For your personal cloud ( Saved Messages ) you can simply use " me " or " self " .
For a contact that exists in your Telegram address book you can use his phone number ( str ) .
message _ id ( ` ` int ` ` ) :
Message identifier in the chat specified in chat _ id .
media ( : obj : ` InputMedia ` )
One of the InputMedia objects describing an animation , audio , document , photo or video .
reply _ markup ( : obj : ` InlineKeyboardMarkup ` , * optional * ) :
An InlineKeyboardMarkup object .
Returns :
On success , the edited : obj : ` Message < pyrogram . Message > ` is returned .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error ."""
|
style = self . html if media . parse_mode . lower ( ) == "html" else self . markdown
caption = media . caption
if isinstance ( media , InputMediaPhoto ) :
if os . path . exists ( media . media ) :
media = self . send ( functions . messages . UploadMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaUploadedPhoto ( file = self . save_file ( media . media ) ) ) )
media = types . InputMediaPhoto ( id = types . InputPhoto ( id = media . photo . id , access_hash = media . photo . access_hash , file_reference = b"" ) )
elif media . media . startswith ( "http" ) :
media = types . InputMediaPhotoExternal ( url = media . media )
else :
try :
decoded = utils . decode ( media . media )
fmt = "<iiqqqqi" if len ( decoded ) > 24 else "<iiqq"
unpacked = struct . unpack ( fmt , decoded )
except ( AssertionError , binascii . Error , struct . error ) :
raise FileIdInvalid from None
else :
if unpacked [ 0 ] != 2 :
media_type = BaseClient . MEDIA_TYPE_ID . get ( unpacked [ 0 ] , None )
if media_type :
raise FileIdInvalid ( "The file_id belongs to a {}" . format ( media_type ) )
else :
raise FileIdInvalid ( "Unknown media type: {}" . format ( unpacked [ 0 ] ) )
media = types . InputMediaPhoto ( id = types . InputPhoto ( id = unpacked [ 2 ] , access_hash = unpacked [ 3 ] , file_reference = b"" ) )
if isinstance ( media , InputMediaVideo ) :
if os . path . exists ( media . media ) :
media = self . send ( functions . messages . UploadMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaUploadedDocument ( mime_type = self . guess_mime_type ( media . media ) or "video/mp4" , thumb = None if media . thumb is None else self . save_file ( media . thumb ) , file = self . save_file ( media . media ) , attributes = [ types . DocumentAttributeVideo ( supports_streaming = media . supports_streaming or None , duration = media . duration , w = media . width , h = media . height ) , types . DocumentAttributeFilename ( file_name = os . path . basename ( media . media ) ) ] ) ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = media . document . id , access_hash = media . document . access_hash , file_reference = b"" ) )
elif media . media . startswith ( "http" ) :
media = types . InputMediaDocumentExternal ( url = media . media )
else :
try :
decoded = utils . decode ( media . media )
fmt = "<iiqqqqi" if len ( decoded ) > 24 else "<iiqq"
unpacked = struct . unpack ( fmt , decoded )
except ( AssertionError , binascii . Error , struct . error ) :
raise FileIdInvalid from None
else :
if unpacked [ 0 ] != 4 :
media_type = BaseClient . MEDIA_TYPE_ID . get ( unpacked [ 0 ] , None )
if media_type :
raise FileIdInvalid ( "The file_id belongs to a {}" . format ( media_type ) )
else :
raise FileIdInvalid ( "Unknown media type: {}" . format ( unpacked [ 0 ] ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = unpacked [ 2 ] , access_hash = unpacked [ 3 ] , file_reference = b"" ) )
if isinstance ( media , InputMediaAudio ) :
if os . path . exists ( media . media ) :
media = self . send ( functions . messages . UploadMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaUploadedDocument ( mime_type = self . guess_mime_type ( media . media ) or "audio/mpeg" , thumb = None if media . thumb is None else self . save_file ( media . thumb ) , file = self . save_file ( media . media ) , attributes = [ types . DocumentAttributeAudio ( duration = media . duration , performer = media . performer , title = media . title ) , types . DocumentAttributeFilename ( file_name = os . path . basename ( media . media ) ) ] ) ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = media . document . id , access_hash = media . document . access_hash , file_reference = b"" ) )
elif media . media . startswith ( "http" ) :
media = types . InputMediaDocumentExternal ( url = media . media )
else :
try :
decoded = utils . decode ( media . media )
fmt = "<iiqqqqi" if len ( decoded ) > 24 else "<iiqq"
unpacked = struct . unpack ( fmt , decoded )
except ( AssertionError , binascii . Error , struct . error ) :
raise FileIdInvalid from None
else :
if unpacked [ 0 ] != 9 :
media_type = BaseClient . MEDIA_TYPE_ID . get ( unpacked [ 0 ] , None )
if media_type :
raise FileIdInvalid ( "The file_id belongs to a {}" . format ( media_type ) )
else :
raise FileIdInvalid ( "Unknown media type: {}" . format ( unpacked [ 0 ] ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = unpacked [ 2 ] , access_hash = unpacked [ 3 ] , file_reference = b"" ) )
if isinstance ( media , InputMediaAnimation ) :
if os . path . exists ( media . media ) :
media = self . send ( functions . messages . UploadMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaUploadedDocument ( mime_type = self . guess_mime_type ( media . media ) or "video/mp4" , thumb = None if media . thumb is None else self . save_file ( media . thumb ) , file = self . save_file ( media . media ) , attributes = [ types . DocumentAttributeVideo ( supports_streaming = True , duration = media . duration , w = media . width , h = media . height ) , types . DocumentAttributeFilename ( file_name = os . path . basename ( media . media ) ) , types . DocumentAttributeAnimated ( ) ] ) ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = media . document . id , access_hash = media . document . access_hash , file_reference = b"" ) )
elif media . media . startswith ( "http" ) :
media = types . InputMediaDocumentExternal ( url = media . media )
else :
try :
decoded = utils . decode ( media . media )
fmt = "<iiqqqqi" if len ( decoded ) > 24 else "<iiqq"
unpacked = struct . unpack ( fmt , decoded )
except ( AssertionError , binascii . Error , struct . error ) :
raise FileIdInvalid from None
else :
if unpacked [ 0 ] != 10 :
media_type = BaseClient . MEDIA_TYPE_ID . get ( unpacked [ 0 ] , None )
if media_type :
raise FileIdInvalid ( "The file_id belongs to a {}" . format ( media_type ) )
else :
raise FileIdInvalid ( "Unknown media type: {}" . format ( unpacked [ 0 ] ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = unpacked [ 2 ] , access_hash = unpacked [ 3 ] , file_reference = b"" ) )
if isinstance ( media , InputMediaDocument ) :
if os . path . exists ( media . media ) :
media = self . send ( functions . messages . UploadMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaUploadedDocument ( mime_type = self . guess_mime_type ( media . media ) or "application/zip" , thumb = None if media . thumb is None else self . save_file ( media . thumb ) , file = self . save_file ( media . media ) , attributes = [ types . DocumentAttributeFilename ( file_name = os . path . basename ( media . media ) ) ] ) ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = media . document . id , access_hash = media . document . access_hash , file_reference = b"" ) )
elif media . media . startswith ( "http" ) :
media = types . InputMediaDocumentExternal ( url = media . media )
else :
try :
decoded = utils . decode ( media . media )
fmt = "<iiqqqqi" if len ( decoded ) > 24 else "<iiqq"
unpacked = struct . unpack ( fmt , decoded )
except ( AssertionError , binascii . Error , struct . error ) :
raise FileIdInvalid from None
else :
if unpacked [ 0 ] not in ( 5 , 10 ) :
media_type = BaseClient . MEDIA_TYPE_ID . get ( unpacked [ 0 ] , None )
if media_type :
raise FileIdInvalid ( "The file_id belongs to a {}" . format ( media_type ) )
else :
raise FileIdInvalid ( "Unknown media type: {}" . format ( unpacked [ 0 ] ) )
media = types . InputMediaDocument ( id = types . InputDocument ( id = unpacked [ 2 ] , access_hash = unpacked [ 3 ] , file_reference = b"" ) )
r = self . send ( functions . messages . EditMessage ( peer = self . resolve_peer ( chat_id ) , id = message_id , reply_markup = reply_markup . write ( ) if reply_markup else None , media = media , ** style . parse ( caption ) ) )
for i in r . updates :
if isinstance ( i , ( types . UpdateEditMessage , types . UpdateEditChannelMessage ) ) :
return pyrogram . Message . _parse ( self , i . message , { i . id : i for i in r . users } , { i . id : i for i in r . chats } )
|
def send_query ( self , query ) :
'''This method is called by the tasks . It is redirected to the submodule .'''
|
if self . __switched_on :
return self . __solr_server_connector . send_query ( query )
else :
msg = 'Not sending query'
LOGGER . debug ( msg )
raise esgfpid . exceptions . SolrSwitchedOff ( msg )
|
def open ( filename , frame = 'unspecified' ) :
"""Creates a NormalCloudImage from a file .
Parameters
filename : : obj : ` str `
The file to load the data from . Must be one of . png , . jpg ,
. npy , or . npz .
frame : : obj : ` str `
A string representing the frame of reference in which the new image
lies .
Returns
: obj : ` NormalCloudImage `
The new NormalCloudImage ."""
|
data = Image . load_data ( filename )
return NormalCloudImage ( data , frame )
|
def push_external_commands ( self , commands ) :
"""Send a HTTP request to the satellite ( POST / r _ un _ external _ commands )
to send the external commands to the satellite
: param results : Results list to send
: type results : list
: return : True on success , False on failure
: rtype : bool"""
|
logger . debug ( "Pushing %d external commands" , len ( commands ) )
return self . con . post ( '_run_external_commands' , { 'cmds' : commands } , wait = True )
|
def p_iteration_statement_1 ( self , p ) :
"""iteration _ statement : DO statement WHILE LPAREN expr RPAREN SEMI
| DO statement WHILE LPAREN expr RPAREN AUTOSEMI"""
|
p [ 0 ] = self . asttypes . DoWhile ( predicate = p [ 5 ] , statement = p [ 2 ] )
p [ 0 ] . setpos ( p )
|
def get_sphinx_doc ( self , name , depth = None , exclude = None , width = 72 , error = False , raised = False , no_comment = False , ) :
r"""Return an exception list marked up in ` reStructuredText ` _ .
: param name : Name of the callable ( method , function or class
property ) to generate exceptions documentation for
: type name : string
: param depth : Hierarchy levels to include in the exceptions
list ( overrides default * * depth * * argument ; see
: py : attr : ` pexdoc . ExDoc . depth ` ) . If None exceptions
at all depths are included
: type depth : non - negative integer or None
: param exclude : List of ( potentially partial ) module and
callable names to exclude from exceptions list
( overrides default * * exclude * * argument ; see
: py : attr : ` pexdoc . ExDoc . exclude ` ) . If None all
callables are included
: type exclude : list of strings or None
: param width : Maximum width of the lines of text ( minimum 40)
: type width : integer
: param error : Flag that indicates whether an exception should
be raised if the callable is not found in the callables
exceptions database ( True ) or not ( False )
: type error : boolean
: param raised : Flag that indicates whether only exceptions that
were raised ( and presumably caught ) should be
documented ( True ) or all registered exceptions
should be documented ( False )
: type raised : boolean
: param no _ comment : Flag that indicates whether a ` reStructuredText ` _
comment labeling the callable ( method , function or
class property ) should be printed ( False ) or not
( True ) before the exceptions documentation
: type no _ comment : boolean
: raises :
* RuntimeError ( Argument \ \ ` depth \ \ ` is not valid )
* RuntimeError ( Argument \ \ ` error \ \ ` is not valid )
* RuntimeError ( Argument \ \ ` exclude \ \ ` is not valid )
* RuntimeError ( Argument \ \ ` no _ comment \ \ ` is not valid )
* RuntimeError ( Argument \ \ ` raised \ \ ` is not valid )
* RuntimeError ( Argument \ \ ` width \ \ ` is not valid )
* RuntimeError ( Callable not found in exception list : * [ name ] * )"""
|
# pylint : disable = R0101 , R0204 , R0912 , R0915 , R0916
if depth and ( ( not isinstance ( depth , int ) ) or ( isinstance ( depth , int ) and ( depth < 0 ) ) ) :
raise RuntimeError ( "Argument `depth` is not valid" )
if exclude and ( ( not isinstance ( exclude , list ) ) or ( isinstance ( exclude , list ) and any ( [ not isinstance ( item , str ) for item in exclude ] ) ) ) :
raise RuntimeError ( "Argument `exclude` is not valid" )
if ( not isinstance ( width , int ) ) or ( isinstance ( width , int ) and ( width < _MINWIDTH ) ) :
raise RuntimeError ( "Argument `width` is not valid" )
if not isinstance ( error , bool ) :
raise RuntimeError ( "Argument `error` is not valid" )
if not isinstance ( raised , bool ) :
raise RuntimeError ( "Argument `raised` is not valid" )
if not isinstance ( no_comment , bool ) :
raise RuntimeError ( "Argument `raised` is not valid" )
depth = self . _depth if depth is None else depth
exclude = self . _exclude if not exclude else exclude
callable_dict = { }
prop = False
# Try to find " regular " callable . The trace may have several calls
# to the same callable , capturing potentially different exceptions
# or behaviors , thus capture them all
instances = self . _tobj . search_tree ( name )
if instances :
callable_dict [ name ] = { "type" : "regular" , "instances" : instances }
else : # Try to find property callable
for action in [ "getter" , "setter" , "deleter" ] :
prop_name = "{name}({action})" . format ( name = name , action = action )
instances = self . _tobj . search_tree ( prop_name )
if instances :
callable_dict [ prop_name ] = { "type" : action , "instances" : instances }
prop = True
if error and ( not callable_dict ) :
raise RuntimeError ( "Callable not found in exception list: {callable}" . format ( callable = name ) )
if not callable_dict : # Callable did not register any exception
return ""
# Create exception table using depth , exclude and raised arguments
sep = self . _tobj . node_separator
dkeys = [ ]
for key , name_dict in callable_dict . items ( ) :
exlist = [ ]
for callable_root in name_dict [ "instances" ] : # Find callable tree depth , this is the reference
# level ( depth = 0 ) for the depth argument
rlevel = callable_root [ : callable_root . index ( name ) ] . count ( sep )
# Create a list of tuples with the full node name of each node
# that contains the callable name ( to find exceptions in tree )
# and the path underneath the callable appearance on the
# callable tree , split by tree path separator ( to determine if
# exception should be added based on depth and exclusion list
nodes = self . _tobj . get_subtree ( callable_root )
tnodes = [ ( node , sep . join ( node . split ( sep ) [ rlevel : ] ) ) for node in nodes ]
for fnode , rnode in tnodes :
data = self . _tobj . _get_data ( fnode )
if ( data and ( ( depth is None ) or ( ( depth is not None ) and ( rnode . count ( sep ) <= depth ) ) ) and ( ( not exclude ) or ( not any ( [ item in rnode for item in exclude ] ) ) ) ) :
for exc in data :
msg = self . _process_exlist ( exc , raised )
if msg is not None :
exlist . append ( msg )
if exlist :
name_dict [ "exlist" ] = list ( set ( exlist [ : ] ) )
else : # A callable can have registered exceptions but none of them
# may meet the depth and exclude specification , in this case
# the entry should be deleted from the dictionary
dkeys . append ( key )
for key in dkeys :
del callable_dict [ key ]
if not callable_dict : # Callable had registered exceptions but not a single one of those
# was raised
return ""
# Generate final output
if no_comment :
exoutput = [ "" ]
else :
template = ".. Auto-generated exceptions documentation for {callable}"
exoutput = [ _format_msg ( template . format ( callable = name ) , width , prefix = ".. " ) ]
exoutput . extend ( [ "" ] )
desc_dict = { "getter" : "retrieved" , "setter" : "assigned" , "deleter" : "deleted" }
if prop :
if len ( callable_dict ) == 1 : # For a property that raises exceptions on one and only one
# action ( set , get or delete ) the format when there is only
# one exception is ( with get as an example action ) :
# : raises : ( when retrieved ) RuntimeError ( Invalid option )
# If there are multiple exceptions :
# : raises : ( when retrieved )
# * RuntimeError ( Invalid options )
# * TypeError ( Wrong type )
callable_root = next ( iter ( callable_dict ) )
action = callable_dict [ callable_root ] [ "type" ]
desc = desc_dict [ action ]
exlist = set ( callable_dict [ callable_root ] [ "exlist" ] )
exlength = len ( exlist )
indent = 1 if exlength == 1 else 3
template = ":raises: (when {action})\n\n" . format ( action = desc )
prefix = ( template . strip ( ) + " " ) if exlength == 1 else " * "
fexlist = [ _format_msg ( "{prefix}{name}" . format ( prefix = prefix , name = name ) , width , indent ) for name in sorted ( list ( exlist ) ) ]
exoutput . extend ( [ ( template if exlength > 1 else "" ) + "\n\n" . join ( fexlist ) ] )
else : # For a property that raises exceptions on more than one
# action ( set , get or delete ) the format is :
# : raises :
# * When assigned :
# * RuntimeError ( Invalid options )
# * TypeError ( Wrong type )
# * When retrieved :
# * RuntimeError ( Null object )
exoutput . append ( ":raises:" )
for action in [ "setter" , "deleter" , "getter" ] :
desc = desc_dict [ action ]
for callable_root in callable_dict :
if callable_dict [ callable_root ] [ "type" ] == action :
exlist = set ( callable_dict [ callable_root ] [ "exlist" ] )
fexlist = [ _format_msg ( " * {name}" . format ( name = name ) , width , 5 ) for name in sorted ( list ( exlist ) ) ]
exoutput . extend ( [ " * When {action}\n\n" . format ( action = desc ) + "\n\n" . join ( fexlist ) + "\n" ] )
else : # For a regular callable ( function or method ) that raises only
# one exception the format is :
# : raises : RuntimeError ( Invalid options )
# For a regular callable ( function or method ) that raises multiple
# exceptions the format is :
# : raises :
# * RuntimeError ( Invalid options )
# * RuntimeError ( Null object )
exlist = set ( callable_dict [ next ( iter ( callable_dict ) ) ] [ "exlist" ] )
exlength = len ( exlist )
indent = 1 if exlength == 1 else 3
prefix = ":raises: " if exlength == 1 else " * "
fexlist = [ _format_msg ( "{prefix}{name}" . format ( prefix = prefix , name = name ) , width , indent ) for name in sorted ( list ( exlist ) ) ]
exoutput . extend ( [ ( ":raises:\n" if exlength > 1 else "" ) + "\n\n" . join ( fexlist ) ] )
exoutput [ - 1 ] = "{line}\n\n" . format ( line = exoutput [ - 1 ] . rstrip ( ) )
return ( "\n" . join ( exoutput ) ) if exoutput else ""
|
def get_dummy_dataloader ( dataloader , target_shape ) :
"""Return a dummy data loader which returns a fixed data batch of target shape"""
|
data_iter = enumerate ( dataloader )
_ , data_batch = next ( data_iter )
logging . debug ( 'Searching target batch shape: %s' , target_shape )
while data_batch [ 0 ] . shape != target_shape :
logging . debug ( 'Skip batch with shape %s' , data_batch [ 0 ] . shape )
_ , data_batch = next ( data_iter )
logging . debug ( 'Found target dummy batch.' )
class DummyIter ( ) :
def __init__ ( self , batch ) :
self . _batch = batch
def __iter__ ( self ) :
while True :
yield self . _batch
return DummyIter ( data_batch )
|
def libvlc_video_get_chapter_description ( p_mi , i_title ) :
'''Get the description of available chapters for specific title .
@ param p _ mi : the media player .
@ param i _ title : selected title .
@ return : list containing description of available chapter for title i _ title .'''
|
f = _Cfunctions . get ( 'libvlc_video_get_chapter_description' , None ) or _Cfunction ( 'libvlc_video_get_chapter_description' , ( ( 1 , ) , ( 1 , ) , ) , None , ctypes . POINTER ( TrackDescription ) , MediaPlayer , ctypes . c_int )
return f ( p_mi , i_title )
|
def _to_json_type ( obj , classkey = None ) :
"""Recursively convert the object instance into a valid JSON type ."""
|
if isinstance ( obj , dict ) :
data = { }
for ( k , v ) in obj . items ( ) :
data [ k ] = _to_json_type ( v , classkey )
return data
elif hasattr ( obj , "_ast" ) :
return _to_json_type ( obj . _ast ( ) )
elif hasattr ( obj , "__iter__" ) :
return [ _to_json_type ( v , classkey ) for v in obj ]
elif hasattr ( obj , "__dict__" ) :
data = dict ( [ ( key , _to_json_type ( value , classkey ) ) for key , value in obj . __dict__ . iteritems ( ) if not callable ( value ) and not key . startswith ( '_' ) ] )
if classkey is not None and hasattr ( obj , "__class__" ) :
data [ classkey ] = obj . __class__ . __name__
return data
else :
return obj
|
def get_parameter_value_from_file_names ( files , parameters = None , unique = False , sort = True ) :
"""Takes a list of files , searches for the parameter name in the file name and returns a ordered dict with the file name
in the first dimension and the corresponding parameter value in the second .
The file names can be sorted by the parameter value , otherwise the order is kept . If unique is true every parameter is unique and
mapped to the file name that occurred last in the files list .
Parameters
files : list of strings
parameter : string or list of strings
unique : bool
sort : bool
Returns
collections . OrderedDict"""
|
# unique = False
logging . debug ( 'Get the parameter: ' + str ( parameters ) + ' values from the file names of ' + str ( len ( files ) ) + ' files' )
files_dict = collections . OrderedDict ( )
if parameters is None : # special case , no parameter defined
return files_dict
if isinstance ( parameters , basestring ) :
parameters = ( parameters , )
search_string = '_' . join ( parameters )
for _ in parameters :
search_string += r'_(-?\d+)'
result = { }
for one_file in files :
parameter_values = re . findall ( search_string , one_file )
if parameter_values :
if isinstance ( parameter_values [ 0 ] , tuple ) :
parameter_values = list ( reduce ( lambda t1 , t2 : t1 + t2 , parameter_values ) )
parameter_values = [ [ int ( i ) , ] for i in parameter_values ]
# convert string value to list with int
files_dict [ one_file ] = dict ( zip ( parameters , parameter_values ) )
if unique : # reduce to the files with different scan parameters
for key , value in files_dict . items ( ) :
if value not in result . values ( ) :
result [ key ] = value
else :
result [ one_file ] = files_dict [ one_file ]
return collections . OrderedDict ( sorted ( result . iteritems ( ) , key = itemgetter ( 1 ) ) if sort else files_dict )
|
def _getMethodNamePrefix ( self , node ) :
"""Return the prefix of this method based on sibling methods .
@ param node : the current node"""
|
targetName = node . name
for sibling in node . parent . nodes_of_class ( type ( node ) ) :
if sibling is node : # We are on the same node in parent so we skip it .
continue
prefix = self . _getCommonStart ( targetName , sibling . name )
if not prefix . rstrip ( '_' ) : # We ignore prefixes which are just underscores .
continue
return prefix
return ''
|
def update ( self , actions = values . unset ) :
"""Update the TaskActionsInstance
: param dict actions : The JSON string that specifies the actions that instruct the Assistant on how to perform the task
: returns : Updated TaskActionsInstance
: rtype : twilio . rest . autopilot . v1 . assistant . task . task _ actions . TaskActionsInstance"""
|
return self . _proxy . update ( actions = actions , )
|
def create ( model , count , * args , ** kwargs ) :
'''Create * count * instances of * model * using the either an appropiate
autofixture that was : ref : ` registry < registry > ` or fall back to the
default : class : ` AutoFixture ` class . * model * can be a model class or its
string representation ( e . g . ` ` " app . ModelClass " ` ` ) .
All positional and keyword arguments are passed to the autofixture
constructor . It is demonstrated in the example below which will create ten
superusers : :
import autofixture
admins = autofixture . create ( ' auth . User ' , 10 , field _ values = { ' is _ superuser ' : True } )
. . note : : See : ref : ` AutoFixture ` for more information .
: func : ` create ` will return a list of the created objects .'''
|
from . compat import get_model
if isinstance ( model , string_types ) :
model = get_model ( * model . split ( '.' , 1 ) )
if model in REGISTRY :
autofixture_class = REGISTRY [ model ]
else :
autofixture_class = AutoFixture
# Get keyword arguments that the create _ one method accepts and pass them
# into create _ one instead of AutoFixture . _ _ init _ _
argnames = set ( getargnames ( autofixture_class . create_one ) )
argnames -= set ( [ 'self' ] )
create_kwargs = { }
for argname in argnames :
if argname in kwargs :
create_kwargs [ argname ] = kwargs . pop ( argname )
autofixture = autofixture_class ( model , * args , ** kwargs )
return autofixture . create ( count , ** create_kwargs )
|
def remove ( self , name ) :
'''Remove a column of data .
Args :
name ( str ) : name of the column to remove
Returns :
None
. . note : :
If the column name does not exist , a warning is issued .'''
|
try :
del self . data [ name ]
except ( ValueError , KeyError ) :
import warnings
warnings . warn ( "Unable to find column '%s' in data source" % name )
|
def get_property ( self , name ) :
"""Gets the given property of the element .
: Args :
- name - Name of the property to retrieve .
: Usage :
text _ length = target _ element . get _ property ( " text _ length " )"""
|
try :
return self . _execute ( Command . GET_ELEMENT_PROPERTY , { "name" : name } ) [ "value" ]
except WebDriverException : # if we hit an end point that doesnt understand getElementProperty lets fake it
return self . parent . execute_script ( 'return arguments[0][arguments[1]]' , self , name )
|
def _sensoryComputeLearningMode ( self , anchorInput ) :
"""Associate this location with a sensory input . Subsequently , anchorInput will
activate the current location during anchor ( ) .
@ param anchorInput ( numpy array )
A sensory input . This will often come from a feature - location pair layer ."""
|
overlaps = self . connections . computeActivity ( anchorInput , self . connectedPermanence )
activeSegments = np . where ( overlaps >= self . activationThreshold ) [ 0 ]
potentialOverlaps = self . connections . computeActivity ( anchorInput )
matchingSegments = np . where ( potentialOverlaps >= self . learningThreshold ) [ 0 ]
# Cells with a active segment : reinforce the segment
cellsForActiveSegments = self . connections . mapSegmentsToCells ( activeSegments )
learningActiveSegments = activeSegments [ np . in1d ( cellsForActiveSegments , self . activeCells ) ]
remainingCells = np . setdiff1d ( self . activeCells , cellsForActiveSegments )
# Remaining cells with a matching segment : reinforce the best
# matching segment .
candidateSegments = self . connections . filterSegmentsByCell ( matchingSegments , remainingCells )
cellsForCandidateSegments = ( self . connections . mapSegmentsToCells ( candidateSegments ) )
candidateSegments = candidateSegments [ np . in1d ( cellsForCandidateSegments , remainingCells ) ]
onePerCellFilter = np2 . argmaxMulti ( potentialOverlaps [ candidateSegments ] , cellsForCandidateSegments )
learningMatchingSegments = candidateSegments [ onePerCellFilter ]
newSegmentCells = np . setdiff1d ( remainingCells , cellsForCandidateSegments )
for learningSegments in ( learningActiveSegments , learningMatchingSegments ) :
self . _learn ( self . connections , self . rng , learningSegments , anchorInput , potentialOverlaps , self . initialPermanence , self . sampleSize , self . permanenceIncrement , self . permanenceDecrement , self . maxSynapsesPerSegment )
# Remaining cells without a matching segment : grow one .
numNewSynapses = len ( anchorInput )
if self . sampleSize != - 1 :
numNewSynapses = min ( numNewSynapses , self . sampleSize )
if self . maxSynapsesPerSegment != - 1 :
numNewSynapses = min ( numNewSynapses , self . maxSynapsesPerSegment )
newSegments = self . connections . createSegments ( newSegmentCells )
self . connections . growSynapsesToSample ( newSegments , anchorInput , numNewSynapses , self . initialPermanence , self . rng )
self . activeSegments = activeSegments
self . sensoryAssociatedCells = self . activeCells
|
def has_bom ( self , f ) :
"""Check for UTF8 , UTF16 , and UTF32 BOMs ."""
|
content = f . read ( 4 )
encoding = None
m = RE_UTF_BOM . match ( content )
if m is not None :
if m . group ( 1 ) :
encoding = 'utf-8-sig'
elif m . group ( 2 ) :
encoding = 'utf-32'
elif m . group ( 3 ) :
encoding = 'utf-32'
elif m . group ( 4 ) :
encoding = 'utf-16'
elif m . group ( 5 ) :
encoding = 'utf-16'
return encoding
|
def _split_rules ( rules ) :
'''Split rules with combined grants into individual rules .
Amazon returns a set of rules with the same protocol , from and to ports
together as a single rule with a set of grants . Authorizing and revoking
rules , however , is done as a split set of rules . This function splits the
rules up .'''
|
split = [ ]
for rule in rules :
ip_protocol = rule . get ( 'ip_protocol' )
to_port = rule . get ( 'to_port' )
from_port = rule . get ( 'from_port' )
grants = rule . get ( 'grants' )
for grant in grants :
_rule = { 'ip_protocol' : ip_protocol , 'to_port' : to_port , 'from_port' : from_port }
for key , val in six . iteritems ( grant ) :
_rule [ key ] = val
split . append ( _rule )
return split
|
def get_parameters ( tp ) :
"""Return type parameters of a parameterizable type as a tuple
in lexicographic order . Parameterizable types are generic types ,
unions , tuple types and callable types . Examples : :
get _ parameters ( int ) = = ( )
get _ parameters ( Generic ) = = ( )
get _ parameters ( Union ) = = ( )
get _ parameters ( List [ int ] ) = = ( )
get _ parameters ( Generic [ T ] ) = = ( T , )
get _ parameters ( Tuple [ List [ T ] , List [ S _ co ] ] ) = = ( T , S _ co )
get _ parameters ( Union [ S _ co , Tuple [ T , T ] ] [ int , U ] ) = = ( U , )
get _ parameters ( Mapping [ T , Tuple [ S _ co , T ] ] ) = = ( T , S _ co )"""
|
if NEW_TYPING :
if ( isinstance ( tp , _GenericAlias ) or isinstance ( tp , type ) and issubclass ( tp , Generic ) and tp is not Generic ) :
return tp . __parameters__
return ( )
if ( is_generic_type ( tp ) or is_union_type ( tp ) or is_callable_type ( tp ) or is_tuple_type ( tp ) ) :
return tp . __parameters__ if tp . __parameters__ is not None else ( )
return ( )
|
def _get_array ( val , shape , default = None , dtype = np . float64 ) :
"""Ensure an object is an array with the specified shape ."""
|
assert val is not None or default is not None
if hasattr ( val , '__len__' ) and len ( val ) == 0 : # pragma : no cover
val = None
# Do nothing if the array is already correct .
if ( isinstance ( val , np . ndarray ) and val . shape == shape and val . dtype == dtype ) :
return val
out = np . zeros ( shape , dtype = dtype )
# This solves ` ValueError : could not broadcast input array from shape ( n )
# into shape ( n , 1 ) ` .
if val is not None and isinstance ( val , np . ndarray ) :
if val . size == out . size :
val = val . reshape ( out . shape )
out . flat [ : ] = val if val is not None else default
assert out . shape == shape
return out
|
def make_symbolic ( self , name , addr , length = None ) :
"""Replaces ` length ` bytes starting at ` addr ` with a symbolic variable named name . Adds a constraint equaling that
symbolic variable to the value previously at ` addr ` , and returns the variable ."""
|
l . debug ( "making %s bytes symbolic" , length )
if isinstance ( addr , str ) :
addr , length = self . state . arch . registers [ addr ]
else :
if length is None :
raise Exception ( "Unspecified length!" )
r = self . load ( addr , length )
v = self . get_unconstrained_bytes ( name , r . size ( ) )
self . store ( addr , v )
self . state . add_constraints ( r == v )
l . debug ( "... eq constraints: %s" , r == v )
return v
|
def get_directory_list_doc ( self , configs ) :
"""JSON dict description of a protorpc . remote . Service in list format .
Args :
configs : Either a single dict or a list of dicts containing the service
configurations to list .
Returns :
dict , The directory list document as a JSON dict ."""
|
if not isinstance ( configs , ( tuple , list ) ) :
configs = [ configs ]
util . check_list_type ( configs , dict , 'configs' , allow_none = False )
return self . __directory_list_descriptor ( configs )
|
def rectangles_from_histogram ( H ) :
"""Largest Rectangular Area in a Histogram
: param H : histogram table
: returns : area , left , height , right , rect . is [ 0 , height ] * [ left , right )
: complexity : linear"""
|
best = ( float ( '-inf' ) , 0 , 0 , 0 )
S = [ ]
H2 = H + [ float ( '-inf' ) ]
# extra element to empty the queue
for right in range ( len ( H2 ) ) :
x = H2 [ right ]
left = right
while len ( S ) > 0 and S [ - 1 ] [ 1 ] >= x :
left , height = S . pop ( )
# first element is area of candidate
rect = ( height * ( right - left ) , left , height , right )
if rect > best :
best = rect
S . append ( ( left , x ) )
return best
|
def remapOpenCv ( im , coords ) :
"""Remap an image using OpenCV . See : func : ` remap ` for parameters ."""
|
# required for older OpenCV versions
im = np . require ( im , im . dtype , 'C' )
return cv2 . remap ( im , coords , None , cv2 . INTER_LANCZOS4 )
|
def get_user_properties ( self ) :
"""Return the properties of the User"""
|
user = self . context . getUser ( )
# No User linked , nothing to do
if user is None :
return { }
out = { }
plone_user = user . getUser ( )
userid = plone_user . getId ( )
for sheet in plone_user . listPropertysheets ( ) :
ps = plone_user . getPropertysheet ( sheet )
out . update ( dict ( ps . propertyItems ( ) ) )
portal = api . get_portal ( )
mtool = getToolByName ( self . context , 'portal_membership' )
out [ "id" ] = userid
out [ "portrait" ] = mtool . getPersonalPortrait ( id = userid )
out [ "edit_url" ] = "{}/@@user-information?userid={}" . format ( portal . absolute_url ( ) , userid )
return out
|
def contains_as_type ( self , value_type , value ) :
"""Checks if this array contains a value .
The check before comparison converts elements and the value to type specified by type code .
: param value _ type : a type code that defines a type to convert values before comparison
: param value : a value to be checked
: return : true if this array contains the value or false otherwise ."""
|
typed_value = TypeConverter . to_nullable_type ( value_type , value )
for element in self :
typed_element = TypeConverter . to_type ( value_type , element )
if typed_value == None and typed_element == None :
return True
if typed_value == None or typed_element == None :
continue
if typed_value == typed_element :
return True
return False
|
def req_withdraw ( self , address , amount , currency , fee = 0 , addr_tag = "" ) :
"""申请提现虚拟币
: param address _ id :
: param amount :
: param currency : btc , ltc , bcc , eth , etc . . . ( 火币Pro支持的币种 )
: param fee :
: param addr - tag :
: return : {
" status " : " ok " ,
" data " : 700"""
|
params = { 'address' : address , 'amount' : amount , 'currency' : currency , 'fee' : fee , 'addr-tag' : addr_tag }
path = '/v1/dw/withdraw/api/create'
def _wrapper ( _func ) :
@ wraps ( _func )
def handle ( ) :
_func ( api_key_post ( params , path ) )
return handle
return _wrapper
|
def get_true_capacity ( self ) :
"""Get the capacity for the scheduled activity , taking into account activity defaults and
overrides ."""
|
c = self . capacity
if c is not None :
return c
else :
if self . rooms . count ( ) == 0 and self . activity . default_capacity : # use activity - level override
return self . activity . default_capacity
rooms = self . get_true_rooms ( )
return EighthRoom . total_capacity_of_rooms ( rooms )
|
def click_text ( self , text , exact_match = False ) :
"""Click text identified by ` ` text ` ` .
By default tries to click first text involves given ` ` text ` ` , if you would
like to click exactly matching text , then set ` ` exact _ match ` ` to ` True ` .
If there are multiple use of ` ` text ` ` and you do not want first one ,
use ` locator ` with ` Get Web Elements ` instead ."""
|
self . _element_find_by_text ( text , exact_match ) . click ( )
|
def rmdir_p ( self ) :
"""Like : meth : ` rmdir ` , but does not raise an exception if the
directory is not empty or does not exist ."""
|
suppressed = FileNotFoundError , FileExistsError , DirectoryNotEmpty
with contextlib . suppress ( suppressed ) :
with DirectoryNotEmpty . translate ( ) :
self . rmdir ( )
return self
|
def delete_lambda_deprecated ( awsclient , function_name , s3_event_sources = [ ] , time_event_sources = [ ] , delete_logs = False ) : # FIXME : mutable default arguments !
"""Deprecated : please use delete _ lambda !
: param awsclient :
: param function _ name :
: param s3 _ event _ sources :
: param time _ event _ sources :
: param delete _ logs :
: return : exit _ code"""
|
unwire_deprecated ( awsclient , function_name , s3_event_sources = s3_event_sources , time_event_sources = time_event_sources , alias_name = ALIAS_NAME )
client_lambda = awsclient . get_client ( 'lambda' )
response = client_lambda . delete_function ( FunctionName = function_name )
if delete_logs :
log_group_name = '/aws/lambda/%s' % function_name
delete_log_group ( awsclient , log_group_name )
# TODO remove event source first and maybe also needed for permissions
log . info ( json2table ( response ) )
return 0
|
def is_subclass ( o , bases ) :
"""Similar to the ` ` issubclass ` ` builtin , but does not raise a ` ` TypeError ` `
if either ` ` o ` ` or ` ` bases ` ` is not an instance of ` ` type ` ` .
Example : :
> > > is _ subclass ( IOError , Exception )
True
> > > is _ subclass ( Exception , None )
False
> > > is _ subclass ( None , Exception )
False
> > > is _ subclass ( IOError , ( None , Exception ) )
True
> > > is _ subclass ( Exception , ( None , 42 ) )
False"""
|
try :
return _issubclass ( o , bases )
except TypeError :
pass
if not isinstance ( o , type ) :
return False
if not isinstance ( bases , tuple ) :
return False
bases = tuple ( b for b in bases if isinstance ( b , type ) )
return _issubclass ( o , bases )
|
def prune ( args ) :
"""% prog prune best . edges
Prune overlap graph ."""
|
from collections import defaultdict
p = OptionParser ( prune . __doc__ )
add_graph_options ( p )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
bestedges , = args
G = read_graph ( bestedges , maxerr = opts . maxerr )
reads_to_ctgs = parse_ctgs ( bestedges , opts . frgctg )
edges = defaultdict ( int )
r = defaultdict ( int )
for a , b , d in G . edges_iter ( data = True ) :
ua , ub = reads_to_ctgs . get ( a ) , reads_to_ctgs . get ( b )
nn = ( ua , ub ) . count ( None )
if nn == 0 :
if ua == ub :
r [ "Same tigs" ] += 1
else :
r [ "Diff tigs" ] += 1
if ua > ub :
ua , ub = ub , ua
edges [ ( ua , ub ) ] += 1
elif nn == 1 :
r [ "One null" ] += 1
else :
assert nn == 2
r [ "Two nulls" ] += 1
U = nx . Graph ( )
difftigs = "diff_tigs.txt"
neighbors = defaultdict ( list )
fw = open ( difftigs , "w" )
for ( ua , ub ) , count in edges . items ( ) :
print ( "\t" . join ( ( ua , ub , str ( count ) ) ) , file = fw )
U . add_edge ( ua , ub , weight = count )
neighbors [ ua ] . append ( ( ub , count ) )
neighbors [ ub ] . append ( ( ua , count ) )
fw . close ( )
print ( "[Unitig edge property]" , file = sys . stderr )
for k , v in r . items ( ) :
print ( ": " . join ( ( k , str ( v ) ) ) , file = sys . stderr )
print ( "Total: {0}" . format ( sum ( r . values ( ) ) ) , file = sys . stderr )
print ( "[Unitig degree distribution]" , file = sys . stderr )
degrees = U . degree ( )
degree_counter = Counter ( degrees . values ( ) )
for degree , count in sorted ( degree_counter . items ( ) ) :
print ( "{0}\t{1}" . format ( degree , count ) , file = sys . stderr )
# To find associative contigs , one look for a contig that is connected and
# only connected to another single contig - and do that recursively until no
# more contigs can be found
associative = { }
for ua , ubs in neighbors . items ( ) :
if len ( ubs ) == 1 : # Only one neighbor
ub , count = ubs [ 0 ]
if count >= 2 : # Bubble
associative [ ua ] = ( ub , count )
print ( "A total of {0} associative contigs found" . format ( len ( associative ) ) , file = sys . stderr )
# Keep only one for mutual associative
for ua , ub in associative . items ( ) :
if ub in associative and ua < ub :
print ( ua , "mutually associative with" , ub , file = sys . stderr )
del associative [ ub ]
print ( "A total of {0} associative contigs retained" . format ( len ( associative ) ) , file = sys . stderr )
assids = "associative.ids"
fw = open ( assids , "w" )
for ua , ( ub , count ) in sorted ( associative . items ( ) , key = lambda x : ( x [ 1 ] , x [ 0 ] ) ) :
print ( "\t" . join ( ( ua , ub , str ( count ) ) ) , file = fw )
fw . close ( )
logging . debug ( "Associative contigs written to `{0}`" . format ( assids ) )
|
def validate ( self ) :
"""Applies all defined validation to the current
state of the object , and raises an error if
they are not all met .
Raises :
ValidationError : if validations do not pass"""
|
missing = self . missing_property_names ( )
if len ( missing ) > 0 :
raise validators . ValidationError ( "'{0}' are required attributes for {1}" . format ( missing , self . __class__ . __name__ ) )
for prop , val in six . iteritems ( self . _properties ) :
if val is None :
continue
if isinstance ( val , ProtocolBase ) :
val . validate ( )
elif getattr ( val , 'isLiteralClass' , None ) is True :
val . validate ( )
elif isinstance ( val , list ) :
for subval in val :
subval . validate ( )
else : # This object is of the wrong type , but just try setting it
# The property setter will enforce its correctness
# and handily coerce its type at the same time
setattr ( self , prop , val )
return True
|
async def get_alarms ( self ) :
"""Get alarms for a Netdata instance ."""
|
url = '{}{}' . format ( self . base_url , self . endpoint )
try :
with async_timeout . timeout ( 5 , loop = self . _loop ) :
response = await self . _session . get ( url )
_LOGGER . debug ( "Response from Netdata: %s" , response . status )
data = await response . text ( )
_LOGGER . debug ( data )
self . alarms = data
except ( asyncio . TimeoutError , aiohttp . ClientError , socket . gaierror ) :
_LOGGER . error ( "Can not load data from Netdata" )
raise exceptions . NetdataConnectionError ( )
|
def status_mute ( self , id ) :
"""Mute notifications for a status .
Returns a ` toot dict ` _ with the now muted status"""
|
id = self . __unpack_id ( id )
url = '/api/v1/statuses/{0}/mute' . format ( str ( id ) )
return self . __api_request ( 'POST' , url )
|
def _generic_signal_handler ( self , signal_type ) :
"""Function for handling both SIGTERM and SIGINT"""
|
print ( "</pre>" )
message = "Got " + signal_type + ". Failing gracefully..."
self . timestamp ( message )
self . fail_pipeline ( KeyboardInterrupt ( signal_type ) , dynamic_recover = True )
sys . exit ( 1 )
|
def auth_complete ( self ) :
"""Whether the authentication handshake is complete during
connection initialization .
: rtype : bool"""
|
timeout = False
auth_in_progress = False
if self . _connection . cbs :
timeout , auth_in_progress = self . _auth . handle_token ( )
if timeout is None and auth_in_progress is None :
_logger . debug ( "No work done." )
return False
if timeout :
raise compat . TimeoutException ( "Authorization timeout." )
if auth_in_progress :
self . _connection . work ( )
return False
return True
|
def footer_length ( header ) :
"""Calculates the ciphertext message footer length , given a complete header .
: param header : Complete message header object
: type header : aws _ encryption _ sdk . structures . MessageHeader
: rtype : int"""
|
footer_length = 0
if header . algorithm . signing_algorithm_info is not None :
footer_length += 2
# Signature Length
footer_length += header . algorithm . signature_len
# Signature
return footer_length
|
async def create_task ( app : web . Application , coro : Coroutine , * args , ** kwargs ) -> asyncio . Task :
"""Convenience function for calling ` TaskScheduler . create ( coro ) `
This will use the default ` TaskScheduler ` to create a new background task .
Example :
import asyncio
from datetime import datetime
from brewblox _ service import scheduler , service
async def current _ time ( interval ) :
while True :
await asyncio . sleep ( interval )
print ( datetime . now ( ) )
async def start ( app ) :
await scheduler . create _ task ( app , current _ time ( interval = 2 ) )
app = service . create _ app ( default _ name = ' example ' )
scheduler . setup ( app )
app . on _ startup . append ( start )
service . furnish ( app )
service . run ( app )"""
|
return await get_scheduler ( app ) . create ( coro , * args , ** kwargs )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.