signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def quantileclip ( arrays , masks = None , dtype = None , out = None , zeros = None , scales = None , weights = None , fclip = 0.10 ) :
"""Combine arrays using the sigma - clipping , with masks .
Inputs and masks are a list of array objects . All input arrays
have the same shape . If present , the masks have the same shape
also .
The function returns an array with one more dimension than the
inputs and with size ( 3 , shape ) . out [ 0 ] contains the mean ,
out [ 1 ] the variance and out [ 2 ] the number of points used .
: param arrays : a list of arrays
: param masks : a list of mask arrays , True values are masked
: param dtype : data type of the output
: param out : optional output , with one more axis than the input arrays
: param fclip : fraction of points removed on both ends . Maximum is 0.4 ( 80 % of points rejected )
: return : mean , variance of the mean and number of points stored""" | return generic_combine ( intl_combine . quantileclip_method ( fclip ) , arrays , masks = masks , dtype = dtype , out = out , zeros = zeros , scales = scales , weights = weights ) |
def suspend ( self ) :
"""Suspend the GNS3 VM .""" | if self . _vmx_path is None :
raise GNS3VMError ( "No VMX path configured, can't suspend the VM" )
try :
yield from self . _execute ( "suspend" , [ self . _vmx_path ] )
except GNS3VMError as e :
log . warning ( "Error when suspending the VM: {}" . format ( str ( e ) ) )
log . info ( "GNS3 VM has been suspended" )
self . running = False |
def get_proxy ( self , input_ ) :
"""Gets a proxy .
: param input : a proxy condition
: type input : ` ` osid . proxy . ProxyCondition ` `
: return : a proxy
: rtype : ` ` osid . proxy . Proxy ` `
: raise : ` ` NullArgument ` ` - - ` ` input ` ` is ` ` null ` `
: raise : ` ` OperationFailed ` ` - - unable to complete request
: raise : ` ` PermissionDenied ` ` - - authorization failure
: raise : ` ` Unsupported ` ` - - ` ` input ` ` is not of this service
* compliance : mandatory - - This method is must be implemented . *""" | if input_ . _http_request is not None :
authentication = DjangoAuthentication ( )
authentication . set_django_user ( input_ . _http_request . user , input_ . _use_user_id )
elif input_ . _xblock_user is not None :
authentication = XBlockAuthentication ( )
authentication . set_xblock_user ( input_ . _xblock_user )
else :
authentication = None
if authentication is not None :
effective_agent_id = authentication . get_agent_id ( )
else :
effective_agent_id = input_ . _effective_agent_id
if input_ . _locale is not None :
locale = input_ . _locale
else :
locale = None
return rules . Proxy ( authentication = authentication , effective_agent_id = effective_agent_id , locale = locale ) |
def fetch ( self , rebuild = False , cache = True ) :
"""Fetches the table and applies all post processors .
Args :
rebuild ( bool ) : Rebuild the table and ignore cache . Default : False
cache ( bool ) : Cache the finished table for faster future loading .
Default : True""" | if rebuild :
return self . _process_table ( cache )
try :
return self . read_cache ( )
except FileNotFoundError :
return self . _process_table ( cache ) |
def evaluate ( self , s ) :
r"""Evaluate : math : ` B ( s ) ` along the curve .
This method acts as a ( partial ) inverse to : meth : ` locate ` .
See : meth : ` evaluate _ multi ` for more details .
. . image : : . . / . . / images / curve _ evaluate . png
: align : center
. . doctest : : curve - eval
: options : + NORMALIZE _ WHITESPACE
> > > nodes = np . asfortranarray ( [
. . . [ 0.0 , 0.625 , 1.0 ] ,
. . . [ 0.0 , 0.5 , 0.5 ] ,
> > > curve = bezier . Curve ( nodes , degree = 2)
> > > curve . evaluate ( 0.75)
array ( [ [ 0.796875 ] ,
[0.46875 ] ] )
. . testcleanup : : curve - eval
import make _ images
make _ images . curve _ evaluate ( curve )
Args :
s ( float ) : Parameter along the curve .
Returns :
numpy . ndarray : The point on the curve ( as a two dimensional
NumPy array with a single column ) .""" | return _curve_helpers . evaluate_multi ( self . _nodes , np . asfortranarray ( [ s ] ) ) |
def expand_indent ( line ) :
"""Return the amount of indentation .
Tabs are expanded to the next multiple of 8.
> > > expand _ indent ( ' ' )
> > > expand _ indent ( ' \\ t ' )
> > > expand _ indent ( ' \\ t ' )
> > > expand _ indent ( ' \\ t ' )
> > > expand _ indent ( ' \\ t ' )
16""" | result = 0
for char in line :
if char == '\t' :
result = result / 8 * 8 + 8
elif char == ' ' :
result += 1
else :
break
return result |
def _write_attribute_categorical ( series , fp ) :
"""Write categories of a categorical / nominal attribute""" | if is_categorical_dtype ( series . dtype ) :
categories = series . cat . categories
string_values = _check_str_array ( categories )
else :
categories = series . dropna ( ) . unique ( )
string_values = sorted ( _check_str_array ( categories ) , key = lambda x : x . strip ( '"' ) )
values = "," . join ( string_values )
fp . write ( "{" )
fp . write ( values )
fp . write ( "}" ) |
def show_history ( self , status = None , nids = None , full_history = False , metadata = False ) :
"""Print the history of the flow to stdout .
Args :
status : if not None , only the tasks with this status are select
full _ history : Print full info set , including nodes with an empty history .
nids : optional list of node identifiers used to filter the tasks .
metadata : print history metadata ( experimental )""" | nrows , ncols = get_terminal_size ( )
works_done = [ ]
# Loop on the tasks and show the history of the work is not in works _ done
for task in self . iflat_tasks ( status = status , nids = nids ) :
work = task . work
if work not in works_done :
works_done . append ( work )
if work . history or full_history :
cprint ( make_banner ( str ( work ) , width = ncols , mark = "=" ) , ** work . status . color_opts )
print ( work . history . to_string ( metadata = metadata ) )
if task . history or full_history :
cprint ( make_banner ( str ( task ) , width = ncols , mark = "=" ) , ** task . status . color_opts )
print ( task . history . to_string ( metadata = metadata ) )
# Print the history of the flow .
if self . history or full_history :
cprint ( make_banner ( str ( self ) , width = ncols , mark = "=" ) , ** self . status . color_opts )
print ( self . history . to_string ( metadata = metadata ) ) |
def files ( self , paths , access = None , extensions = None , minsize = None ) :
"""Verify list of files""" | self . failures = [ path for path in paths if not isvalid ( path , access , extensions , 'file' , minsize ) ]
return not self . failures |
def _shutdown ( self ) :
"""Gracefully shut down the consumer and exit .""" | if self . _channel :
_log . info ( "Halting %r consumer sessions" , self . _channel . consumer_tags )
self . _running = False
if self . _connection and self . _connection . is_open :
self . _connection . close ( )
# Reset the signal handler
for signum in ( signal . SIGTERM , signal . SIGINT ) :
signal . signal ( signum , signal . SIG_DFL ) |
def visit_ifexp ( self , node , parent ) :
"""visit a IfExp node by returning a fresh instance of it""" | newnode = nodes . IfExp ( node . lineno , node . col_offset , parent )
newnode . postinit ( self . visit ( node . test , newnode ) , self . visit ( node . body , newnode ) , self . visit ( node . orelse , newnode ) , )
return newnode |
def _get_item_class ( self , url ) :
"""Return the model class matching a URL""" | if '/layers/' in url :
return Layer
elif '/tables/' in url :
return Table
elif '/sets/' in url :
return Set
# elif ' / documents / ' in url :
# return Document
else :
raise NotImplementedError ( "No support for catalog results of type %s" % url ) |
def seek ( self , offset , whence = os . SEEK_SET ) :
"""Seeks to an offset within the file - like object .
Args :
offset ( int ) : offset to seek to .
whence ( Optional ( int ) ) : value that indicates whether offset is an absolute
or relative position within the file .
Raises :
IOError : if the seek failed .
OSError : if the seek failed .""" | if not self . _is_open :
raise IOError ( 'Not opened.' )
# For a yet unknown reason a Python file - like object on Windows allows for
# invalid whence values to be passed to the seek function . This check
# makes sure the behavior of the function is the same on all platforms .
if whence not in [ os . SEEK_SET , os . SEEK_CUR , os . SEEK_END ] :
raise IOError ( 'Unsupported whence.' )
self . _file_object . seek ( offset , whence ) |
def translate ( self , vector ) :
"""Translates ` Atom ` .
Parameters
vector : 3D Vector ( tuple , list , numpy . array )
Vector used for translation .
inc _ alt _ states : bool , optional
If true , will rotate atoms in all states i . e . includes
alternate conformations for sidechains .""" | vector = numpy . array ( vector )
self . _vector += numpy . array ( vector )
return |
def load_texture ( renderer , file ) :
"""Load an image directly into a render texture .
Args :
renderer : The renderer to make the texture .
file : The image file to load .
Returns :
A new texture""" | return Texture . _from_ptr ( check_ptr_err ( lib . IMG_LoadTexture ( renderer . _ptr , file ) ) ) |
def setName ( self , name ) :
"""Define name for this expression , makes debugging and exception messages clearer .
Example : :
Word ( nums ) . parseString ( " ABC " ) # - > Exception : Expected W : ( 0123 . . . ) ( at char 0 ) , ( line : 1 , col : 1)
Word ( nums ) . setName ( " integer " ) . parseString ( " ABC " ) # - > Exception : Expected integer ( at char 0 ) , ( line : 1 , col : 1)""" | self . name = name
self . errmsg = "Expected " + self . name
if hasattr ( self , "exception" ) :
self . exception . msg = self . errmsg
return self |
def set_config_token_from_env ( section , token , config ) :
'''Given a config section and token , checks for an appropriate
environment variable . If the variable exists , sets the config entry to
its value .
The environment variable checked is of the form SECTION _ TOKEN , all
upper case , with any dots replaced by underscores .
Returns True if the environment variable exists and was used , or
False otherwise .''' | env_var_name = '' . join ( [ section . upper ( ) , '_' , token . upper ( ) . replace ( '.' , '_' ) ] )
env_var = os . environ . get ( env_var_name )
if env_var is None :
return False
config . set ( section , token , env_var )
return True |
def pairwise_iou ( boxlist1 , boxlist2 ) :
"""Computes pairwise intersection - over - union between box collections .
Args :
boxlist1 : Nx4 floatbox
boxlist2 : Mx4
Returns :
a tensor with shape [ N , M ] representing pairwise iou scores .""" | intersections = pairwise_intersection ( boxlist1 , boxlist2 )
areas1 = area ( boxlist1 )
areas2 = area ( boxlist2 )
unions = ( tf . expand_dims ( areas1 , 1 ) + tf . expand_dims ( areas2 , 0 ) - intersections )
return tf . where ( tf . equal ( intersections , 0.0 ) , tf . zeros_like ( intersections ) , tf . truediv ( intersections , unions ) ) |
def update_work_as_completed ( self , worker_id , work_id , other_values = None , error = None ) :
"""Updates work piece in datastore as completed .
Args :
worker _ id : ID of the worker which did the work
work _ id : ID of the work which was done
other _ values : dictionary with additonal values which should be saved
with the work piece
error : if not None then error occurred during computation of the work
piece . In such case work will be marked as completed with error .
Returns :
whether work was successfully updated""" | client = self . _datastore_client
try :
with client . transaction ( ) as transaction :
work_key = client . key ( KIND_WORK_TYPE , self . _work_type_entity_id , KIND_WORK , work_id )
work_entity = client . get ( work_key , transaction = transaction )
if work_entity [ 'claimed_worker_id' ] != worker_id :
return False
work_entity [ 'is_completed' ] = True
if other_values :
work_entity . update ( other_values )
if error :
work_entity [ 'error' ] = text_type ( error )
transaction . put ( work_entity )
except Exception :
return False
return True |
def sed ( file_path , pattern , replace_str , g = 0 ) :
"""Python impl of the bash sed command
This method emulates the functionality of a bash sed command .
: param file _ path : ( str ) Full path to the file to be edited
: param pattern : ( str ) Search pattern to replace as a regex
: param replace _ str : ( str ) String to replace the pattern
: param g : ( int ) Whether to globally replace ( 0 ) or replace 1
instance ( equivalent to the ' g ' option in bash sed
: return : None
: raises CommandError""" | log = logging . getLogger ( mod_logger + '.sed' )
# Type checks on the args
if not isinstance ( file_path , basestring ) :
msg = 'file_path argument must be a string'
log . error ( msg )
raise CommandError ( msg )
if not isinstance ( pattern , basestring ) :
msg = 'pattern argument must be a string'
log . error ( msg )
raise CommandError ( msg )
if not isinstance ( replace_str , basestring ) :
msg = 'replace_str argument must be a string'
log . error ( msg )
raise CommandError ( msg )
# Ensure the file _ path file exists
if not os . path . isfile ( file_path ) :
msg = 'File not found: {f}' . format ( f = file_path )
log . error ( msg )
raise CommandError ( msg )
# Search for a matching pattern and replace matching patterns
log . info ( 'Updating file: %s...' , file_path )
for line in fileinput . input ( file_path , inplace = True ) :
if re . search ( pattern , line ) :
log . info ( 'Updating line: %s' , line )
new_line = re . sub ( pattern , replace_str , line , count = g )
log . info ( 'Replacing with line: %s' , new_line )
sys . stdout . write ( new_line )
else :
sys . stdout . write ( line ) |
def create ( self , set ) :
"""Creates a new Set .""" | target_url = self . client . get_url ( 'SET' , 'POST' , 'create' )
r = self . client . request ( 'POST' , target_url , json = set . _serialize ( ) )
return set . _deserialize ( r . json ( ) , self ) |
def generate_form_data ( self , ** kwargs ) :
"""Create a form dictionary with the key being the element name
and the value being a list of form element objects .""" | # Add elements that are missing from the form .
self . children = add_missing_children ( self . contained_children , self . children )
# Add children to the keyword arguments .
kwargs [ 'children' ] = self . children
# Create the form object .
return FormGenerator ( ** kwargs ) |
def process_query_conditionally ( self , query ) :
"""Process a Query that is used within a lazy loader .
( the process _ query _ conditionally ( ) method is a SQLAlchemy
hook invoked only within lazyload . )""" | if query . _current_path :
mapper , prop = query . _current_path [ - 2 : ]
for cls in mapper . class_ . __mro__ :
k = ( cls , prop . key )
relationship_option = self . _relationship_options . get ( k )
if relationship_option :
query . _cache = relationship_option
break |
def _start_install ( self ) :
"""Start the installation""" | self . _check_title ( self . browser . title ( ) )
continue_link = next ( self . browser . links ( text_regex = 'Start Installation' ) )
self . browser . follow_link ( continue_link ) |
def _pdist ( p ) :
"""Return PL or ECPL instance based on parameters p""" | index , ref , ampl , cutoff , beta = p [ : 5 ]
if cutoff == 0.0 :
pdist = models . PowerLaw ( ampl * 1e30 * u . Unit ( "1/eV" ) , ref * u . TeV , index )
else :
pdist = models . ExponentialCutoffPowerLaw ( ampl * 1e30 * u . Unit ( "1/eV" ) , ref * u . TeV , index , cutoff * u . TeV , beta = beta , )
return pdist |
def score ( self , text ) :
"""Scores a sample of text
: param text : sample text to score
: type text : str
: return : dict of scores per category
: rtype : dict""" | occurs = self . count_token_occurrences ( self . tokenizer ( text ) )
scores = { }
for category in self . categories . get_categories ( ) . keys ( ) :
scores [ category ] = 0
categories = self . categories . get_categories ( ) . items ( )
for word , count in occurs . items ( ) :
token_scores = { }
# Adding up individual token scores
for category , bayes_category in categories :
token_scores [ category ] = float ( bayes_category . get_token_count ( word ) )
# We use this to get token - in - category probabilities
token_tally = sum ( token_scores . values ( ) )
# If this token isn ' t found anywhere its probability is 0
if token_tally == 0.0 :
continue
# Calculating bayes probabiltity for this token
# http : / / en . wikipedia . org / wiki / Naive _ Bayes _ spam _ filtering
for category , token_score in token_scores . items ( ) : # Bayes probability * the number of occurances of this token
scores [ category ] += count * self . calculate_bayesian_probability ( category , token_score , token_tally )
# Removing empty categories from the results
final_scores = { }
for category , score in scores . items ( ) :
if score > 0 :
final_scores [ category ] = score
return final_scores |
def _produceIt ( self , segments , thunk ) :
"""Underlying implmeentation of L { PrefixURLMixin . produceResource } and
L { PrefixURLMixin . sessionlessProduceResource } .
@ param segments : the URL segments to dispatch .
@ param thunk : a 0 - argument callable which returns an L { IResource }
provider , or None .
@ return : a 2 - tuple of C { ( resource , remainingSegments ) } , or L { None } .""" | if not self . prefixURL :
needle = ( )
else :
needle = tuple ( self . prefixURL . split ( '/' ) )
S = len ( needle )
if segments [ : S ] == needle :
if segments == JUST_SLASH : # I * HATE * THE WEB
subsegments = segments
else :
subsegments = segments [ S : ]
res = thunk ( )
# Even though the URL matched up , sometimes we might still
# decide to not handle this request ( eg , some prerequisite
# for our function is not met by the store ) . Allow None
# to be returned by createResource to indicate this case .
if res is not None :
return res , subsegments |
def cw_encode ( index , value , value_format ) :
"""cw : Write a custom value .""" | if value_format == 2 :
value = value [ 0 ] << 8 + value [ 1 ]
return MessageEncode ( '0Dcw{:02d}{:05d}00' . format ( index + 1 , value ) , None ) |
def assert_angles_allclose ( x , y , ** kwargs ) :
"""Like numpy ' s assert _ allclose , but for angles ( in radians ) .""" | c2 = ( np . sin ( x ) - np . sin ( y ) ) ** 2 + ( np . cos ( x ) - np . cos ( y ) ) ** 2
diff = np . arccos ( ( 2.0 - c2 ) / 2.0 )
# a = b = 1
assert np . allclose ( diff , 0.0 , ** kwargs ) |
def get_true_argspec ( method ) :
"""Drills through layers of decorators attempting to locate the actual argspec for the method .""" | argspec = inspect . getargspec ( method )
args = argspec [ 0 ]
if args and args [ 0 ] == 'self' :
return argspec
if hasattr ( method , '__func__' ) :
method = method . __func__
if not hasattr ( method , '__closure__' ) or method . __closure__ is None :
raise DecoratorCompatibilityError
closure = method . __closure__
for cell in closure :
inner_method = cell . cell_contents
if inner_method is method :
continue
if not inspect . isfunction ( inner_method ) and not inspect . ismethod ( inner_method ) :
continue
true_argspec = get_true_argspec ( inner_method )
if true_argspec :
return true_argspec |
def to_dict ( self , depth = - 1 , ** kwargs ) :
"""Returns a dict representation of the object .""" | out = super ( Figure , self ) . to_dict ( depth = depth , ** kwargs )
out [ 'header' ] = self . header . to_dict ( depth = depth - 1 , ** kwargs )
out [ 'html' ] = self . html . to_dict ( depth = depth - 1 , ** kwargs )
out [ 'script' ] = self . script . to_dict ( depth = depth - 1 , ** kwargs )
return out |
def compare ( self , first , second , chamber , type = 'votes' , congress = CURRENT_CONGRESS ) :
"""See how often two members voted together in a given Congress .
Takes two member IDs , a chamber and a Congress number .""" | check_chamber ( chamber )
path = "members/{first}/{type}/{second}/{congress}/{chamber}.json"
path = path . format ( first = first , second = second , type = type , congress = congress , chamber = chamber )
return self . fetch ( path ) |
def write_pem ( text , path , overwrite = True , pem_type = None ) :
'''Writes out a PEM string fixing any formatting or whitespace
issues before writing .
text :
PEM string input to be written out .
path :
Path of the file to write the pem out to .
overwrite :
If True ( default ) , write _ pem will overwrite the entire pem file .
Set False to preserve existing private keys and dh params that may
exist in the pem file .
pem _ type :
The PEM type to be saved , for example ` ` CERTIFICATE ` ` or
` ` PUBLIC KEY ` ` . Adding this will allow the function to take
input that may contain multiple pem types .
CLI Example :
. . code - block : : bash
salt ' * ' x509 . write _ pem " - - - - - BEGIN CERTIFICATE - - - - - MIIGMzCCBBugA . . . " path = / etc / pki / mycert . crt''' | with salt . utils . files . set_umask ( 0o077 ) :
text = get_pem_entry ( text , pem_type = pem_type )
_dhparams = ''
_private_key = ''
if pem_type and pem_type == 'CERTIFICATE' and os . path . isfile ( path ) and not overwrite :
_filecontents = _text_or_file ( path )
try :
_dhparams = get_pem_entry ( _filecontents , 'DH PARAMETERS' )
except salt . exceptions . SaltInvocationError as err :
log . debug ( "Error when getting DH PARAMETERS: %s" , err )
log . trace ( err , exc_info = err )
try :
_private_key = get_pem_entry ( _filecontents , '(?:RSA )?PRIVATE KEY' )
except salt . exceptions . SaltInvocationError as err :
log . debug ( "Error when getting PRIVATE KEY: %s" , err )
log . trace ( err , exc_info = err )
with salt . utils . files . fopen ( path , 'w' ) as _fp :
if pem_type and pem_type == 'CERTIFICATE' and _private_key :
_fp . write ( salt . utils . stringutils . to_str ( _private_key ) )
_fp . write ( salt . utils . stringutils . to_str ( text ) )
if pem_type and pem_type == 'CERTIFICATE' and _dhparams :
_fp . write ( salt . utils . stringutils . to_str ( _dhparams ) )
return 'PEM written to {0}' . format ( path ) |
def add_pattern ( self , pattern , category = SourceRootCategories . UNKNOWN ) :
"""Add a pattern to the trie .""" | self . _do_add_pattern ( pattern , tuple ( ) , category ) |
def lookup_field_help ( self , field , default = None ) :
"""Looks up the help text for the passed in field .""" | help = None
# is there a label specified for this field
if field in self . field_config and 'help' in self . field_config [ field ] :
help = self . field_config [ field ] [ 'help' ]
# if we were given a default , use that
elif default :
help = default
# try to see if there is a description on our model
elif hasattr ( self , 'model' ) :
for model_field in self . model . _meta . fields :
if model_field . name == field :
help = model_field . help_text
break
return help |
def add_properties ( self , filename ) :
"""Add properties to config based on filename replacing previous values .
: param filename : str path to YAML file to pull top level properties from""" | filename = os . path . expanduser ( filename )
if os . path . exists ( filename ) :
with open ( filename , 'r' ) as yaml_file :
self . update_properties ( yaml . safe_load ( yaml_file ) ) |
def rewrite_file_imports ( item , vendored_libs ) :
"""Rewrite ' import xxx ' and ' from xxx import ' for vendored _ libs""" | text = item . read_text ( encoding = 'utf-8' )
for lib in vendored_libs :
text = re . sub ( r'(\n\s*)import %s(\n\s*)' % lib , r'\1from pythonfinder._vendor import %s\2' % lib , text , )
text = re . sub ( r'(\n\s*)from %s' % lib , r'\1from pythonfinder._vendor.%s' % lib , text , )
item . write_text ( text , encoding = 'utf-8' ) |
def DateStringToDateObject ( date_string ) :
"""Return a date object for a string " YYYYMMDD " .""" | # If this becomes a bottleneck date objects could be cached
if re . match ( '^\d{8}$' , date_string ) == None :
return None
try :
return datetime . date ( int ( date_string [ 0 : 4 ] ) , int ( date_string [ 4 : 6 ] ) , int ( date_string [ 6 : 8 ] ) )
except ValueError :
return None |
def change_note_duration ( self , at , to ) :
"""Change the note duration at the given index to the given
duration .""" | if valid_beat_duration ( to ) :
diff = 0
for x in self . bar :
if diff != 0 :
x [ 0 ] [ 0 ] -= diff
if x [ 0 ] == at :
cur = x [ 0 ] [ 1 ]
x [ 0 ] [ 1 ] = to
diff = 1 / cur - 1 / to |
def _get_extra_price_id ( items , key_name , hourly , location ) :
"""Returns a price id attached to item with the given key _ name .""" | for item in items :
if utils . lookup ( item , 'keyName' ) != key_name :
continue
for price in item [ 'prices' ] :
if not _matches_billing ( price , hourly ) :
continue
if not _matches_location ( price , location ) :
continue
return price [ 'id' ]
raise SoftLayer . SoftLayerError ( "Could not find valid price for extra option, '%s'" % key_name ) |
def migrate ( record , dry_run = False ) :
'''Perform database migrations''' | handler = record_migration if record else execute_migration
success = True
for plugin , package , filename in available_migrations ( ) :
migration = get_migration ( plugin , filename )
if migration or not success :
log_status ( plugin , filename , cyan ( 'Skipped' ) )
else :
status = magenta ( 'Recorded' ) if record else yellow ( 'Apply' )
log_status ( plugin , filename , status )
script = resource_string ( package , join ( 'migrations' , filename ) )
success &= handler ( plugin , filename , script , dryrun = dry_run ) |
def _get_broadcast_shape ( shape1 , shape2 ) :
"""Given two shapes that are not identical , find the shape
that both input shapes can broadcast to .""" | if shape1 == shape2 :
return shape1
length1 = len ( shape1 )
length2 = len ( shape2 )
if length1 > length2 :
shape = list ( shape1 )
else :
shape = list ( shape2 )
i = max ( length1 , length2 ) - 1
for a , b in zip ( shape1 [ : : - 1 ] , shape2 [ : : - 1 ] ) :
if a != 1 and b != 1 and a != b :
raise ValueError ( 'shape1=%s is not broadcastable to shape2=%s' % ( shape1 , shape2 ) )
shape [ i ] = max ( a , b )
i -= 1
return tuple ( shape ) |
def get_assessments_offered_by_banks ( self , bank_ids ) :
"""Gets the list of ` ` AssessmentOffered ` ` objects corresponding to a list of ` ` Banks ` ` .
arg : bank _ ids ( osid . id . IdList ) : list of bank ` ` Ids ` `
return : ( osid . assessment . AssessmentOfferedList ) - list of
assessments offered
raise : NullArgument - ` ` bank _ ids ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure occurred
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceBinSession . get _ resources _ by _ bins
assessment_offered_list = [ ]
for bank_id in bank_ids :
assessment_offered_list += list ( self . get_assessments_offered_by_bank ( bank_id ) )
return objects . AssessmentOfferedList ( assessment_offered_list ) |
def _handle_tag_schedule_return ( self , tag , data ) :
'''Handle a _ schedule _ return event''' | # reporting current connection with master
if data [ 'schedule' ] . startswith ( master_event ( type = 'alive' , master = '' ) ) :
if data [ 'return' ] :
log . debug ( 'Connected to master %s' , data [ 'schedule' ] . split ( master_event ( type = 'alive' , master = '' ) ) [ 1 ] )
self . _return_pub ( data , ret_cmd = '_return' , sync = False ) |
def __build_python_module_cache ( self ) :
"""Recursively walks through the b2 / src subdirectories and
creates an index of base module name to package name . The
index is stored within self . _ _ python _ module _ cache and allows
for an O ( 1 ) module lookup .
For example , given the base module name ` toolset ` ,
self . _ _ python _ module _ cache [ ' toolset ' ] will return
' b2 . build . toolset '
pkgutil . walk _ packages ( ) will find any python package
provided a directory contains an _ _ init _ _ . py . This has the
added benefit of allowing libraries to be installed and
automatically avaiable within the contrib directory .
* Note * : pkgutil . walk _ packages ( ) will import any subpackage
in order to access its _ _ path _ _ variable . Meaning :
any initialization code will be run if the package hasn ' t
already been imported .""" | cache = { }
for importer , mname , ispkg in pkgutil . walk_packages ( b2 . __path__ , prefix = 'b2.' ) :
basename = mname . split ( '.' ) [ - 1 ]
# since the jam code is only going to have " import toolset ; "
# it doesn ' t matter if there are separately named " b2 . build . toolset " and
# " b2 . contrib . toolset " as it is impossible to know which the user is
# referring to .
if basename in cache :
self . manager . errors ( ) ( 'duplicate module name "{0}" ' 'found in boost-build path' . format ( basename ) )
cache [ basename ] = mname
self . __python_module_cache = cache |
def potential_jumps ( self ) :
"""All nearest - neighbour jumps not blocked by volume exclusion
( i . e . from occupied to neighbouring unoccupied sites ) .
Args :
None
Returns :
( List ( Jump ) ) : List of possible jumps .""" | jumps = [ ]
if self . number_of_occupied_sites <= self . number_of_sites / 2 :
for occupied_site in self . occupied_sites ( ) :
unoccupied_neighbours = [ site for site in [ self . site_with_id ( n ) for n in occupied_site . neighbours ] if not site . is_occupied ]
for vacant_site in unoccupied_neighbours :
jumps . append ( jump . Jump ( occupied_site , vacant_site , self . nn_energy , self . cn_energies , self . jump_lookup_table ) )
else :
for vacant_site in self . vacant_sites ( ) :
occupied_neighbours = [ site for site in [ self . site_with_id ( n ) for n in vacant_site . neighbours ] if site . is_occupied ]
for occupied_site in occupied_neighbours :
jumps . append ( jump . Jump ( occupied_site , vacant_site , self . nn_energy , self . cn_energies , self . jump_lookup_table ) )
return jumps |
def find_django_migrations_module ( module_name ) :
"""Tries to locate < module _ name > . migrations _ django ( without actually importing it ) .
Appends either " . migrations _ django " or " . migrations " to module _ name .
For details why :
https : / / docs . djangoproject . com / en / 1.7 / topics / migrations / # libraries - third - party - apps""" | import imp
try :
module_info = imp . find_module ( module_name )
module = imp . load_module ( module_name , * module_info )
imp . find_module ( 'migrations_django' , module . __path__ )
return module_name + '.migrations_django'
except ImportError :
return module_name + '.migrations' |
def resolve_name ( name , module = None ) :
"""Resolve a dotted name to a module and its parts . This is stolen
wholesale from unittest . TestLoader . loadTestByName .""" | parts = name . split ( '.' )
parts_copy = parts [ : ]
if module is None :
while parts_copy : # pragma : no cover
try :
module = __import__ ( '.' . join ( parts_copy ) )
break
except ImportError :
del parts_copy [ - 1 ]
if not parts_copy :
raise
parts = parts [ 1 : ]
obj = module
for part in parts :
obj = getattr ( obj , part )
return obj |
def user_login ( self , email = None , password = None ) :
"""Login with email , password and get back a session cookie
: type email : str
: param email : The email used for authentication
: type password : str
: param password : The password used for authentication""" | self . _rpc_api = PiazzaRPC ( )
self . _rpc_api . user_login ( email = email , password = password ) |
def wait_for_states ( self , timeout = 40 , * states ) :
"""Wait until port reaches one of the requested states .
: param timeout : max time to wait for requested port states .""" | state = self . get_attribute ( 'state' )
for _ in range ( timeout ) :
if state in states :
return
time . sleep ( 1 )
state = self . get_attribute ( 'state' )
raise TgnError ( 'Failed to reach states {}, port state is {} after {} seconds' . format ( states , state , timeout ) ) |
def TPAGB_properties ( self ) :
"""Temporary , use for now same function in nugrid _ set . py !
Returns many TPAGB parameters which are
TPstart , TPmods , TP _ max _ env , TPend , min _ m _ TP , max _ m _ TP , DUPmods , DUPm _ min _ h
Same function in nugrid _ set . py .
Parameters""" | peak_lum_model , h1_mass_min_DUP_model = self . find_TP_attributes ( 3 , t0_model = self . find_first_TP ( ) , color = 'r' , marker_type = 'o' )
print ( 'first tp' )
print ( self . find_first_TP ( ) )
print ( 'peak lum mmmodel' )
print ( peak_lum_model )
print ( h1_mass_min_DUP_model )
TPmods = peak_lum_model
DUPmods = h1_mass_min_DUP_model
DUPmods1 = [ ]
for k in range ( len ( DUPmods ) ) :
DUPmods1 . append ( int ( float ( DUPmods [ k ] ) ) + 100 )
# to exclude HBB ? effects
DUPmods = DUPmods1
TPstart = [ ]
# find beginning of TP , goes from TP peak backwards
# find end of PDCZ by seeking from TP peak and checking mx2 _ bot :
models = self . get ( 'model_number' )
mx2b_array = self . get ( 'conv_mx2_bot' )
mx2t_array = self . get ( 'conv_mx2_top' )
massbot = mx2b_array
# * self . header _ attr [ ' initial _ mass ' ]
masstop = mx2t_array
# * self . header _ attr [ ' initial _ mass ' ]
massenv = np . array ( self . get ( 'conv_mx1_bot' ) ) * np . array ( self . get ( 'star_mass' ) )
# * self . header _ attr [ ' initial _ mass ' ]
# h1 _ bdy = self . get ( ' h1 _ boundary _ mass ' )
for k in range ( len ( TPmods ) ) :
idx = list ( models ) . index ( TPmods [ k ] )
mx2b = mx2b_array [ : idx ]
for i in range ( len ( mx2b ) - 1 , 0 , - 1 ) :
if mx2b [ i ] == 0. :
startTP = models [ i ]
TPstart . append ( int ( float ( startTP ) ) )
break
# Find end of TP , goes from TP forwards :
TPend = [ ]
max_m_TP = [ ]
min_m_TP = [ ]
DUP_m = [ ]
TP_max_env = [ ]
DUPm_min_h = [ ]
flagdecline = False
for k in range ( len ( TPmods ) ) :
idx = list ( models ) . index ( TPmods [ k ] )
mx2b = mx2b_array [ idx : ]
mx2t = mx2t_array [ idx : ]
refsize = mx2t [ 0 ] - mx2b [ 0 ]
for i in range ( len ( mx2b ) ) :
if i == 0 :
continue
if ( ( mx2t [ i ] - mx2b [ i ] ) < ( 0.5 * refsize ) ) and ( flagdecline == False ) :
flagdecline = True
refmasscoord = mx2t [ i ]
print ( 'flagdecline to true' )
continue
if flagdecline == True :
if ( mx2t [ i ] - mx2b [ i ] ) < ( 0.1 * refsize ) : # for the massive and HDUP AGB ' s where PDCZ conv zone becomes the Hdup CONV ZONE
if refmasscoord < mx2t [ i ] :
endTP = models [ idx + i - 1 ]
TPend . append ( int ( float ( endTP ) ) )
print ( 'HDUp, TP end' , endTP )
break
if ( mx2t [ i ] - mx2b [ i ] ) < 1e-5 :
endTP = models [ idx + i - 1 ]
TPend . append ( int ( float ( endTP ) ) )
print ( 'normal TPend' , endTP )
break
# if max ( mx2t [ 0 : ( i - 1 ) ] ) > mx2t [ i ] :
# ( max ( mx2t [ 0 : ( i - 1 ) ] ) - min ( mx2b [ 0 : ( i - 1 ) ] ) )
# flag = True
# continue
# if flag = = True :
# endidx = idx + i
# endTP = models [ endidx ]
# TPend . append ( int ( float ( endTP ) ) )
# if ( mx2t [ i ] - mx2b [ i ] ) < 1e - 5 : # mx2b [ i ] ) = = 0 . :
# endidx = idx + i
# endTP = models [ endidx ]
# TPend . append ( int ( float ( endTP ) ) )
# break
print ( 'found TP boundaries' , TPstart [ - 1 ] , TPend [ - 1 ] )
# find max and minimum mass coord of TP at max Lum
mtot = self . get ( 'star_mass' )
masstop_tot = np . array ( masstop ) * np . array ( mtot )
idx_tpext = list ( masstop_tot ) . index ( max ( masstop_tot [ TPstart [ k ] : ( TPend [ k ] - 10 ) ] ) )
print ( 'TP' , k + 1 , TPmods [ k ] )
print ( TPstart [ k ] , TPend [ k ] )
print ( 'INDEX' , idx_tpext , models [ idx_tpext ] )
print ( max ( masstop_tot [ TPstart [ k ] : ( TPend [ k ] - 10 ) ] ) )
mtot = self . get ( 'star_mass' ) [ idx_tpext ]
max_m_TP . append ( masstop [ idx_tpext ] * mtot )
min_m_TP . append ( massbot [ idx_tpext ] * mtot )
TP_max_env . append ( massenv [ idx_tpext ] )
# * mtot )
if k > ( len ( DUPmods ) - 1 ) :
continue
idx = list ( models ) . index ( DUPmods [ k ] )
mtot = self . get ( 'star_mass' ) [ idx ]
# DUP _ m . append ( h1 _ bdy [ idx ] ) # * mtot )
# # # # # # identify if it is really a TDUP , Def .
try :
h1_bndry = self . get ( "h1_boundary_mass" ) [ t0_idx : ]
except :
try :
h1_bndry = self . get ( 'he_core_mass' ) [ t0_idx : ]
except :
pass
if h1_bndry [ idx ] >= max_m_TP [ - 1 ] :
print ( 'Pulse' , k + 1 , 'model' , TPmods [ k ] , 'skip' )
print ( h1_bndry [ idx ] , max_m_TP [ - 1 ] )
DUPmods [ k ] = - 1
DUPm_min_h . append ( - 1 )
continue
DUPm_min_h . append ( h1_bdy [ idx ] )
for k in range ( len ( TPmods ) ) :
print ( '#############' )
print ( 'TP ' , k + 1 )
print ( 'Start: ' , TPstart [ k ] )
print ( 'Peak' , TPmods [ k ] , TP_max_env [ k ] )
print ( '(conv) PDCZ size: ' , min_m_TP [ k ] , ' till ' , max_m_TP [ k ] )
print ( 'End' , TPend [ k ] )
if k <= ( len ( DUPmods ) - 1 ) :
print ( len ( DUPmods ) , k )
print ( 'DUP max' , DUPmods [ k ] )
print ( DUPm_min_h [ k ] )
else :
print ( 'no DUP' )
return TPstart , TPmods , TP_max_env , TPend , min_m_TP , max_m_TP , DUPmods , DUPm_min_h |
def handle ( self , ** options ) :
"""Create " Class of 20[16-19 ] " groups""" | students_grp , _ = Group . objects . get_or_create ( name = "Students" )
sg_prop = students_grp . properties
sg_prop . student_visible = True
sg_prop . save ( )
students_grp . save ( )
for gr in [ settings . SENIOR_GRADUATION_YEAR + y for y in range ( 0 , 4 ) ] :
users = User . objects . users_in_year ( gr )
grp , _ = Group . objects . get_or_create ( name = "Class of {}" . format ( gr ) )
grp_prop = grp . properties
grp_prop . student_visible = True
grp_prop . save ( )
grp . save ( )
self . stdout . write ( "{}: {} users" . format ( gr , len ( users ) ) )
for u in users :
u . groups . add ( grp )
u . groups . add ( students_grp )
u . save ( )
self . stdout . write ( "{}: Processed" . format ( gr ) )
self . stdout . write ( "Done." ) |
def verify_checksum ( self , progress_callback = None , chunk_size = None , throws = True , checksum_kwargs = None , ** kwargs ) :
"""Verify checksum of file instance .
: param bool throws : If ` True ` , exceptions raised during checksum
calculation will be re - raised after logging . If set to ` False ` , and
an exception occurs , the ` last _ check ` field is set to ` None `
( ` last _ check _ at ` of course is updated ) , since no check actually was
performed .
: param dict checksum _ kwargs : Passed as ` * * kwargs ` ` to
` ` storage ( ) . checksum ` ` .""" | try :
real_checksum = self . storage ( ** kwargs ) . checksum ( progress_callback = progress_callback , chunk_size = chunk_size , ** ( checksum_kwargs or { } ) )
except Exception as exc :
current_app . logger . exception ( str ( exc ) )
if throws :
raise
real_checksum = None
with db . session . begin_nested ( ) :
self . last_check = ( None if real_checksum is None else ( self . checksum == real_checksum ) )
self . last_check_at = datetime . utcnow ( )
return self . last_check |
def add_molecule ( self , mol , bond = None , base = None , target = None ) :
"""connect atom group ( for SMILES parser )
May requires recalculation of 2D coordinate for drawing
Args :
mol : graphmol . Compound ( )
the original object will be copied .
bond : Bond object to be connected .
the original will not be copied so be careful .
base : index of atom in self to connect
target : index of atom in group to be connected
Raises :
TypeError""" | ai = self . available_idx ( )
mapping = { n : n + ai - 1 for n , _ in mol . atoms_iter ( ) }
relabeled = nx . relabel_nodes ( mol . graph , mapping )
# copy = True
self . graph . add_nodes_from ( relabeled . nodes ( data = True ) )
self . graph . add_edges_from ( relabeled . edges ( data = True ) )
if bond :
self . add_bond ( base , mapping [ target ] , bond ) |
def get_job ( self , job_id ) :
"""GetJob
https : / / apidocs . joyent . com / manta / api . html # GetJob""" | log . debug ( "GetJob %r" , job_id )
path = "/%s/jobs/%s/live/status" % ( self . account , job_id )
res , content = self . _request ( path , "GET" )
if res . status != 200 :
raise errors . MantaAPIError ( res , content )
try :
return json . loads ( content )
except ValueError :
raise errors . MantaError ( 'invalid job data: %r' % content ) |
def get_branch_container_tag ( self ) :
"""Returns the branch container tag""" | if self . __prefix :
return "{0}-{1}" . format ( self . __prefix , self . __branch )
else :
return "{0}" . format ( self . __branch ) |
def type ( self , type ) :
"""Sets the type of this AggregatedQuotaUsageReport .
Type of quota usage entry .
: param type : The type of this AggregatedQuotaUsageReport .
: type : str""" | if type is None :
raise ValueError ( "Invalid value for `type`, must not be `None`" )
allowed_values = [ "reservation" , "reservation_release" , "reservation_termination" , "package_renewal" , "package_creation" , "package_termination" ]
if type not in allowed_values :
raise ValueError ( "Invalid value for `type` ({0}), must be one of {1}" . format ( type , allowed_values ) )
self . _type = type |
def tokenize ( string ) :
"""Split the input several times , returning intermediate results at each level :
- delimited by underscores
- letter / number boundaries
- word segments
E . g . , tokenize ( ' landuse _ austin _ tx _ 24dates ' ) - >
[ ' landuse ' , ' land ' , ' use ' , ' austin ' , ' tx ' , ' 24dates ' , ' 24 ' , ' dates ' ]
( Don ' t need a token for the original string because to _ tsvector splits on underscores . )""" | if not wordsegment . BIGRAMS : # Should only happen in dev .
wordsegment . load ( )
lvl1_parts = string . split ( '_' )
for lvl1 in lvl1_parts :
lvl2_parts = ALPHA_NUM_RE . findall ( lvl1 )
if len ( lvl2_parts ) > 1 :
yield lvl1
for lvl2 in lvl2_parts :
lvl3_parts = wordsegment . segment ( lvl2 )
if len ( lvl3_parts ) > 1 :
yield lvl2
yield from lvl3_parts |
def percentage ( self , percentage ) :
"""Sets the percentage of this OrderLineItemTax .
The percentage of the tax , as a string representation of a decimal number . A value of ` 7.25 ` corresponds to a percentage of 7.25 % .
: param percentage : The percentage of this OrderLineItemTax .
: type : str""" | if percentage is None :
raise ValueError ( "Invalid value for `percentage`, must not be `None`" )
if len ( percentage ) > 10 :
raise ValueError ( "Invalid value for `percentage`, length must be less than `10`" )
self . _percentage = percentage |
def AppendPathEntries ( cls , path , path_separator , number_of_wildcards , skip_first ) :
"""Appends glob wildcards to a path .
This function will append glob wildcards " * " to a path , returning paths
with an additional glob wildcard up to the specified number . E . g . given
the path " / tmp " and a number of 2 wildcards , this function will return
" tmp / * " , " tmp / * / * " . When skip _ first is true the path with the first
wildcard is not returned as a result .
Args :
path ( str ) : path to append glob wildcards to .
path _ separator ( str ) : path segment separator .
number _ of _ wildcards ( int ) : number of glob wildcards to append .
skip _ first ( bool ) : True if the the first path with glob wildcard should
be skipped as a result .
Returns :
list [ str ] : paths with glob wildcards .""" | if path [ - 1 ] == path_separator :
path = path [ : - 1 ]
if skip_first :
path = '' . join ( [ path , path_separator , '*' ] )
number_of_wildcards -= 1
paths = [ ]
for _ in range ( 0 , number_of_wildcards ) :
path = '' . join ( [ path , path_separator , '*' ] )
paths . append ( path )
return paths |
def _set_zero ( self , i , j , a , b , r , s , t ) :
"""Let A [ i , j ] be zero based on Bezout ' s identity
[ ii ij ]
[ ji jj ] is a ( k , k ) minor of original 3x3 matrix .""" | L = np . eye ( 3 , dtype = 'intc' )
L [ i , i ] = s
L [ i , j ] = t
L [ j , i ] = - b // r
L [ j , j ] = a // r
self . _L . append ( L . copy ( ) )
self . _A = np . dot ( L , self . _A ) |
def clusterdown_wrapper ( func ) :
"""Wrapper for CLUSTERDOWN error handling .
If the cluster reports it is down it is assumed that :
- connection _ pool was disconnected
- connection _ pool was reseted
- refereh _ table _ asap set to True
It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail .""" | @ wraps ( func )
async def inner ( * args , ** kwargs ) :
for _ in range ( 0 , 3 ) :
try :
return await func ( * args , ** kwargs )
except ClusterDownError : # Try again with the new cluster setup . All other errors
# should be raised .
pass
# If it fails 3 times then raise exception back to caller
raise ClusterDownError ( "CLUSTERDOWN error. Unable to rebuild the cluster" )
return inner |
def deserialize ( serialized_script ) :
'''bytearray - > str''' | deserialized = [ ]
i = 0
while i < len ( serialized_script ) :
current_byte = serialized_script [ i ]
if current_byte == 0xab :
raise NotImplementedError ( 'OP_CODESEPARATOR is a bad idea.' )
if current_byte <= 75 and current_byte != 0 :
deserialized . append ( serialized_script [ i + 1 : i + 1 + current_byte ] . hex ( ) )
i += 1 + current_byte
if i > len ( serialized_script ) :
raise IndexError ( 'Push {} caused out of bounds exception.' . format ( current_byte ) )
elif current_byte == 76 : # next hex blob length
blob_len = serialized_script [ i + 1 ]
deserialized . append ( serialized_script [ i + 2 : i + 2 + blob_len ] . hex ( ) )
i += 2 + blob_len
elif current_byte == 77 : # next hex blob length
blob_len = utils . le2i ( serialized_script [ i + 1 : i + 3 ] )
deserialized . append ( serialized_script [ i + 3 : i + 3 + blob_len ] . hex ( ) )
i += 3 + blob_len
elif current_byte == 78 :
raise NotImplementedError ( 'OP_PUSHDATA4 is a bad idea.' )
else :
if current_byte in riemann . network . INT_TO_CODE_OVERWRITE :
deserialized . append ( riemann . network . INT_TO_CODE_OVERWRITE [ current_byte ] )
elif current_byte in INT_TO_CODE :
deserialized . append ( INT_TO_CODE [ current_byte ] )
else :
raise ValueError ( 'Unsupported opcode. ' 'Got 0x%x' % serialized_script [ i ] )
i += 1
return ' ' . join ( deserialized ) |
def change_credentials ( self , user = None , auth_token = None ) :
"""Change login credentials .
: param str user : Username used to connect to server .
: param str auth _ token : Authentication token used to connect to server .""" | self . r_session . set_credentials ( user , auth_token )
self . r_session . login ( ) |
def create_tag ( self , tag , message , sha , obj_type , tagger , lightweight = False ) :
"""Create a tag in this repository .
: param str tag : ( required ) , name of the tag
: param str message : ( required ) , tag message
: param str sha : ( required ) , SHA of the git object this is tagging
: param str obj _ type : ( required ) , type of object being tagged , e . g . ,
' commit ' , ' tree ' , ' blob '
: param dict tagger : ( required ) , containing the name , email of the
tagger and the date it was tagged
: param bool lightweight : ( optional ) , if False , create an annotated
tag , otherwise create a lightweight tag ( a Reference ) .
: returns : If lightweight = = False : : class : ` Tag < github3 . git . Tag > ` if
successful , else None . If lightweight = = True : : class : ` Reference
< github3 . git . Reference > `""" | if lightweight and tag and sha :
return self . create_ref ( 'refs/tags/' + tag , sha )
json = None
if tag and message and sha and obj_type and len ( tagger ) == 3 :
data = { 'tag' : tag , 'message' : message , 'object' : sha , 'type' : obj_type , 'tagger' : tagger }
url = self . _build_url ( 'git' , 'tags' , base_url = self . _api )
json = self . _json ( self . _post ( url , data = data ) , 201 )
if json :
self . create_ref ( 'refs/tags/' + tag , sha )
return Tag ( json ) if json else None |
def get_medium_attachments_of_controller ( self , name ) :
"""Returns an array of medium attachments which are attached to the
the controller with the given name .
in name of type str
return medium _ attachments of type : class : ` IMediumAttachment `
raises : class : ` VBoxErrorObjectNotFound `
A storage controller with given name doesn ' t exist .""" | if not isinstance ( name , basestring ) :
raise TypeError ( "name can only be an instance of type basestring" )
medium_attachments = self . _call ( "getMediumAttachmentsOfController" , in_p = [ name ] )
medium_attachments = [ IMediumAttachment ( a ) for a in medium_attachments ]
return medium_attachments |
def level2_initialize ( self ) :
"""Load abspath , dirname , basename , fname , ext , atime , ctime , mtime ,
size _ on _ disk attributes in initialization .
* * 中文文档 * *
比较全面但稍慢的WinFile对象初始化方法 , 从绝对路径中取得 :
- 绝对路径
- 父目录路径
- 文件全名
- 纯文件名
- 文件扩展名
- access time
- create time
- modify time
- 文件占据磁盘大小""" | self . dirname , self . basename = os . path . split ( self . abspath )
# 目录名 , 文件名
self . fname , self . ext = os . path . splitext ( self . basename )
# 纯文件名 , 文件扩展名
self . ext = self . ext . lower ( )
self . size_on_disk = os . path . getsize ( self . abspath )
self . atime = os . path . getatime ( self . abspath )
# 接触时间
self . ctime = os . path . getctime ( self . abspath )
# 创建时间 , 当文件被修改后不变
self . mtime = os . path . getmtime ( self . abspath ) |
def BGPSessionState_originator_switch_info_switchIdentifier ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
BGPSessionState = ET . SubElement ( config , "BGPSessionState" , xmlns = "http://brocade.com/ns/brocade-notification-stream" )
originator_switch_info = ET . SubElement ( BGPSessionState , "originator-switch-info" )
switchIdentifier = ET . SubElement ( originator_switch_info , "switchIdentifier" )
switchIdentifier . text = kwargs . pop ( 'switchIdentifier' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def reshuffle ( expr , by = None , sort = None , ascending = True ) :
"""Reshuffle data .
: param expr :
: param by : the sequence or scalar to shuffle by . RandomScalar as default
: param sort : the sequence or scalar to sort .
: param ascending : True if ascending else False
: return : collection""" | by = by or RandomScalar ( )
grouped = expr . groupby ( by )
if sort :
grouped = grouped . sort_values ( sort , ascending = ascending )
return ReshuffledCollectionExpr ( _input = grouped , _schema = expr . _schema ) |
def encode ( self , method , uri ) :
'''Called by the client to encode Authentication header .''' | if not self . username or not self . password :
return
o = self . options
qop = o . get ( 'qop' )
realm = o . get ( 'realm' )
nonce = o . get ( 'nonce' )
entdig = None
p_parsed = urlparse ( uri )
path = p_parsed . path
if p_parsed . query :
path += '?' + p_parsed . query
ha1 = self . ha1 ( realm , self . password )
ha2 = self . ha2 ( qop , method , path )
if qop == 'auth' :
if nonce == self . last_nonce :
self . nonce_count += 1
else :
self . nonce_count = 1
ncvalue = '%08x' % self . nonce_count
s = str ( self . nonce_count ) . encode ( 'utf-8' )
s += nonce . encode ( 'utf-8' )
s += time . ctime ( ) . encode ( 'utf-8' )
s += os . urandom ( 8 )
cnonce = sha1 ( s ) . hexdigest ( ) [ : 16 ]
noncebit = "%s:%s:%s:%s:%s" % ( nonce , ncvalue , cnonce , qop , ha2 )
respdig = self . KD ( ha1 , noncebit )
elif qop is None :
respdig = self . KD ( ha1 , "%s:%s" % ( nonce , ha2 ) )
else : # XXX handle auth - int .
return
base = ( 'username="%s", realm="%s", nonce="%s", uri="%s", ' 'response="%s"' % ( self . username , realm , nonce , path , respdig ) )
opaque = o . get ( 'opaque' )
if opaque :
base += ', opaque="%s"' % opaque
if entdig :
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % self . algorithm
if qop :
base += ', qop=%s, nc=%s, cnonce="%s"' % ( qop , ncvalue , cnonce )
return 'Digest %s' % base |
def export_warnings ( self , export_file ) :
"""Append an export warnings entry to the journal .
This instructs Revit to export warnings from the opened model .
Currently Revit will stop journal execution if the model does not
have any warnings and the export warnings UI button is disabled .
Args :
export _ file ( str ) : full path of the ouput html file""" | warn_filepath = op . dirname ( export_file )
warn_filename = op . splitext ( op . basename ( export_file ) ) [ 0 ]
self . _add_entry ( templates . EXPORT_WARNINGS . format ( warnings_export_path = warn_filepath , warnings_export_file = warn_filename ) ) |
def pluck ( self , column ) :
"""Pluck a single column from the database .
: param column : THe column to pluck
: type column : str
: return : The column value
: rtype : mixed""" | result = self . first ( [ column ] )
if result :
return result [ column ] |
def parse ( self , buffer ) :
"""This method is used to parse an existing datagram into its
corresponding TftpPacket object . The buffer is the raw bytes off of
the network .""" | log . debug ( "parsing a %d byte packet" % len ( buffer ) )
( opcode , ) = struct . unpack ( str ( "!H" ) , buffer [ : 2 ] )
log . debug ( "opcode is %d" % opcode )
packet = self . __create ( opcode )
packet . buffer = buffer
return packet . decode ( ) |
def get_cited_dois ( arxiv_id ) :
"""Get the DOIs of the papers cited in a . bbl file .
. . note : :
Bulk download of sources from arXiv is not permitted by their API . You should have a look at http : / / arxiv . org / help / bulk _ data _ s3.
: param arxiv _ id : The arXiv id ( e . g . ` ` 1401.2910 ` ` or ` ` 1401.2910v1 ` ` ) in a canonical form .
: returns : A dict of cleaned plaintext citations and their associated DOI .""" | dois = { }
# Get the list of bbl files for this preprint
bbl_files = arxiv . get_bbl ( arxiv_id )
for bbl_file in bbl_files : # Fetch the cited DOIs for each of the bbl files
dois . update ( bbl . get_cited_dois ( bbl_file ) )
return dois |
def get_request ( self , uuid , raw = False , multiple = False , connection_adapter = None ) :
"""Get a RPC request .
: param str uuid : Rpc Identifier
: param bool raw : If enabled return the frame as is , else return
result as a dictionary .
: param bool multiple : Are we expecting multiple frames .
: param obj connection _ adapter : Provide custom connection adapter .
: return :""" | if uuid not in self . _response :
return
self . _wait_for_request ( uuid , connection_adapter or self . _default_connection_adapter )
frame = self . _get_response_frame ( uuid )
if not multiple :
self . remove ( uuid )
result = None
if raw :
result = frame
elif frame is not None :
result = dict ( frame )
return result |
def build_report ( self ) :
"""Calculates the pair of metrics for each threshold for each result .""" | thresholds = self . thresholds
lower_quantile = self . config [ 'lower_quantile' ]
upper_quantile = self . config [ 'upper_quantile' ]
if self . n_current_results > self . n_cached_curves : # If there are new curves , recompute
colnames = [ '_' . join ( [ metric , stat ] ) for metric in [ self . metric1 . name , self . metric2 . name ] for stat in [ 'Mean' , 'Median' , '%d_Percentile' % ( 100 * lower_quantile ) , '%d_Percentile' % ( upper_quantile * 100 ) ] ]
self . ret = pd . DataFrame ( columns = colnames , index = thresholds , dtype = 'float64' )
for threshold in thresholds :
m1s = Series ( [ self . metric1 . score ( result , threshold ) for result in self . results ] )
m2s = Series ( [ self . metric2 . score ( result , threshold ) for result in self . results ] )
self . ret . loc [ threshold ] = ( m1s . mean ( ) , m1s . quantile ( .5 ) , m1s . quantile ( .05 ) , m1s . quantile ( .95 ) , m2s . mean ( ) , m2s . quantile ( .5 ) , m2s . quantile ( .05 ) , m2s . quantile ( .95 ) )
self . build_curves ( )
self . summary_df = self . ret
return self . ret |
def __add_endpoints ( self ) :
"""Initialize the Sanic JWT Blueprint and add to the instance initialized""" | for mapping in endpoint_mappings :
if all ( map ( self . config . get , mapping . keys ) ) :
self . __add_single_endpoint ( mapping . cls , mapping . endpoint , mapping . is_protected )
self . bp . exception ( exceptions . SanicJWTException ) ( self . responses . exception_response )
if not self . instance_is_blueprint :
url_prefix = self . _get_url_prefix ( )
self . instance . blueprint ( self . bp , url_prefix = url_prefix ) |
def var ( self , bias = False , * args , ** kwargs ) :
"""Exponential weighted moving variance .""" | nv . validate_window_func ( 'var' , args , kwargs )
def f ( arg ) :
return libwindow . ewmcov ( arg , arg , self . com , int ( self . adjust ) , int ( self . ignore_na ) , int ( self . min_periods ) , int ( bias ) )
return self . _apply ( f , ** kwargs ) |
def _regenerate_secret_key ( self ) :
"""Regenerate secret key
http : / / www . mediafire . com / developers / core _ api / 1.3 / getting _ started / # call _ signature""" | # Don ' t regenerate the key if we have none
if self . _session and 'secret_key' in self . _session :
self . _session [ 'secret_key' ] = ( int ( self . _session [ 'secret_key' ] ) * 16807 ) % 2147483647 |
def root_block ( template_name = DEFAULT_TEMPLATE_NAME ) :
"""A decorator that is used to define that the decorated block function
will be at the root of the block template hierarchy . In the usual case
this will be the HTML skeleton of the document , unless the template is used
to serve partial HTML rendering for Ajax .
The : func : ` root _ block ` decorator accepts the following arguments :
: param template _ name : The name of the block template hierarchy which is
passed to the : func : ` render _ template ` document
rendering function . Different templates are useful
for rendering documents with differing layouts
( e . g . admin back - end vs . site front - end ) , or for
partial HTML rendering for Ajax .""" | def decorator ( block_func ) :
block = RootBlock ( block_func , template_name )
return block_func
return decorator |
def ConvBPDNMask ( * args , ** kwargs ) :
"""A wrapper function that dynamically defines a class derived from
one of the implementations of the Convolutional Constrained MOD
problems , and returns an object instantiated with the provided
parameters . The wrapper is designed to allow the appropriate
object to be created by calling this function using the same
syntax as would be used if it were a class . The specific
implementation is selected by use of an additional keyword
argument ' method ' . Valid values are :
- ` ` ' admm ' ` ` :
Use the implementation defined in : class : ` . admm . cbpdn . ConvBPDNMaskDcpl ` .
- ` ` ' fista ' ` ` :
Use the implementation defined in : class : ` . fista . cbpdn . ConvBPDNMask ` .
The default value is ` ` ' admm ' ` ` .""" | # Extract method selection argument or set default
method = kwargs . pop ( 'method' , 'admm' )
# Assign base class depending on method selection argument
base = cbpdnmsk_class_label_lookup ( method )
# Nested class with dynamically determined inheritance
class ConvBPDNMask ( base ) :
def __init__ ( self , * args , ** kwargs ) :
super ( ConvBPDNMask , self ) . __init__ ( * args , ** kwargs )
# Allow pickling of objects of type ConvBPDNMask
_fix_dynamic_class_lookup ( ConvBPDNMask , method )
# Return object of the nested class type
return ConvBPDNMask ( * args , ** kwargs ) |
def random_date_between ( start_date , end_date ) :
"""Return random date between start / end""" | assert isinstance ( start_date , datetime . date )
delta_secs = int ( ( end_date - start_date ) . total_seconds ( ) )
delta = datetime . timedelta ( seconds = random . randint ( 0 , delta_secs ) )
return ( start_date + delta ) |
def next_chunk ( self ) :
"""Returns the chunk immediately following ( and adjacent to ) this one , if it exists .
: returns : The following chunk , or None if applicable""" | def sym_base_handler ( base ) :
l . warning ( "A computed chunk base is symbolic; maximizing it" )
return self . state . solver . max_int ( base )
base = concretize ( self . base + self . get_size ( ) , self . state . solver , sym_base_handler )
if base >= self . heap . heap_base + self . heap . heap_size - 2 * self . _chunk_size_t_size :
return None
else :
return PTChunk ( base , self . state ) |
def parse_enum_type_definition ( lexer : Lexer ) -> EnumTypeDefinitionNode :
"""UnionTypeDefinition""" | start = lexer . token
description = parse_description ( lexer )
expect_keyword ( lexer , "enum" )
name = parse_name ( lexer )
directives = parse_directives ( lexer , True )
values = parse_enum_values_definition ( lexer )
return EnumTypeDefinitionNode ( description = description , name = name , directives = directives , values = values , loc = loc ( lexer , start ) , ) |
def file_create ( filename , settings ) :
"""Creates a file .
Args :
filename ( str ) : Filename .
settings ( dict ) : Must be { " content " : actual _ content }""" | if len ( settings ) != 1 :
raise ValueError ( "Settings must only contain one item with key " "'content'." )
for k , v in settings . items ( ) :
if k == "content" :
with open ( filename , 'w' ) as f :
f . write ( v ) |
def bind ( self , flask_app , service , group = None ) :
"""Bind the service API urls to a flask app .""" | if group not in self . services [ service ] :
raise RuntimeError ( 'API group {} does not exist in service {}' . format ( group , service ) )
for name , api in self . services [ service ] [ group ] . items ( ) : # only bind APIs that have views associated with them
if api . view_fn is None :
continue
if name not in flask_app . view_functions :
flask_app . add_url_rule ( api . url , name , view_func = api . view_fn , ** api . options ) |
def _emit ( self , event , document , * args , ** kwargs ) :
"""Inner version of emit which passes the given document as the
primary argument to handler functions .""" | self . handler_registrar ( ) . apply ( event , document , * args , ** kwargs ) |
async def create_wallet ( config : str , credentials : str ) -> None :
"""Creates a new secure wallet with the given unique name .
: param config : Wallet configuration json .
" id " : string , Identifier of the wallet .
Configured storage uses this identifier to lookup exact wallet data placement .
" storage _ type " : optional < string > , Type of the wallet storage . Defaults to ' default ' .
' Default ' storage type allows to store wallet data in the local file .
Custom storage types can be registered with indy _ register _ wallet _ storage call .
" storage _ config " : optional < object > , Storage configuration json . Storage type defines set of supported keys .
Can be optional if storage supports default configuration .
For ' default ' storage type configuration is :
" path " : optional < string > , Path to the directory with wallet files .
Defaults to $ HOME / . indy _ client / wallet .
Wallet will be stored in the file { path } / { id } / sqlite . db
: param credentials : Wallet credentials json
" key " : string , Key or passphrase used for wallet key derivation .
Look to key _ derivation _ method param for information about supported key derivation methods .
" storage _ credentials " : optional < object > Credentials for wallet storage . Storage type defines set of supported keys .
Can be optional if storage supports default configuration .
For ' default ' storage type should be empty .
" key _ derivation _ method " : optional < string > Algorithm to use for wallet key derivation :
ARGON2I _ MOD - derive secured wallet master key ( used by default )
ARGON2I _ INT - derive secured wallet master key ( less secured but faster )
RAW - raw wallet key master provided ( skip derivation ) .
RAW keys can be generated with generate _ wallet _ key call
: return : Error code""" | logger = logging . getLogger ( __name__ )
logger . debug ( "create_wallet: >>> config: %r, credentials: %r" , config , credentials )
if not hasattr ( create_wallet , "cb" ) :
logger . debug ( "create_wallet: Creating callback" )
create_wallet . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 ) )
c_config = c_char_p ( config . encode ( 'utf-8' ) )
c_credentials = c_char_p ( credentials . encode ( 'utf-8' ) )
await do_call ( 'indy_create_wallet' , c_config , c_credentials , create_wallet . cb )
logger . debug ( "create_wallet: <<<" ) |
def _format_dataframe ( df : pd . DataFrame , nautical_units = True ) -> pd . DataFrame :
"""This function converts types , strips spaces after callsigns and sorts
the DataFrame by timestamp .
For some reason , all data arriving from OpenSky are converted to
units in metric system . Optionally , you may convert the units back to
nautical miles , feet and feet / min .""" | if "callsign" in df . columns and df . callsign . dtype == object :
df . callsign = df . callsign . str . strip ( )
if nautical_units :
df . altitude = df . altitude / 0.3048
if "geoaltitude" in df . columns :
df . geoaltitude = df . geoaltitude / 0.3048
if "groundspeed" in df . columns :
df . groundspeed = df . groundspeed / 1852 * 3600
if "vertical_rate" in df . columns :
df . vertical_rate = df . vertical_rate / 0.3048 * 60
df . timestamp = pd . to_datetime ( df . timestamp * 1e9 ) . dt . tz_localize ( "utc" )
if "last_position" in df . columns :
df = df . query ( "last_position == last_position" ) . assign ( last_position = pd . to_datetime ( df . last_position * 1e9 ) . dt . tz_localize ( "utc" ) )
return df . sort_values ( "timestamp" ) |
def _getInterfaces ( self ) :
"""Load application communication interfaces .
: return : < dict >""" | interfaces = { }
interfacesPath = os . path . join ( "application" , "interface" )
interfaceList = os . listdir ( interfacesPath )
for file in interfaceList :
interfaceDirectoryPath = os . path . join ( interfacesPath , file )
if not os . path . isdir ( interfaceDirectoryPath ) or file . startswith ( "__" ) or file . startswith ( "." ) :
continue
interfaceName = ntpath . basename ( interfaceDirectoryPath )
interfacePath = os . path . join ( interfaceDirectoryPath , interfaceName ) + ".py"
if not os . path . isfile ( interfacePath ) :
continue
# importing interface
interfaceSpec = importlib . util . spec_from_file_location ( interfaceName , interfacePath )
interface = importlib . util . module_from_spec ( interfaceSpec )
interfaceSpec . loader . exec_module ( interface )
# checking if there is an interface in the file
if hasattr ( interface , "Service" ) : # initializing interface
interfaceInstance = interface . Service ( self )
interfaces [ interfaceName ] = interfaceInstance
return interfaces |
def _send ( self , msg , buffers = None ) :
"""Sends a message to the model in the front - end .""" | if self . comm is not None and self . comm . kernel is not None :
self . comm . send ( data = msg , buffers = buffers ) |
def clean_api_docs_dirs ( ) :
"""Empty previous api - docs directory .
: returns : Path to api - docs directory .
: rtype : str""" | inasafe_docs_path = os . path . abspath ( os . path . join ( os . path . dirname ( __file__ ) , '..' , 'docs' , 'api-docs' ) )
if os . path . exists ( inasafe_docs_path ) :
rmtree ( inasafe_docs_path )
create_dirs ( inasafe_docs_path )
return inasafe_docs_path |
def format_value ( self , value , isToAlign = False , format_target = "html" ) :
"""Format a value nicely for human - readable output ( including rounding ) .
@ param value : the value to format
@ param isToAlign : if True , spaces will be added to the returned String representation to align it to all
other values in this column , correctly
@ param format _ target the target the value should be formatted for
@ return : a formatted String representation of the given value .""" | # Only format counts and measures
if self . type . type != ColumnType . count and self . type . type != ColumnType . measure :
return value
if format_target not in POSSIBLE_FORMAT_TARGETS :
raise ValueError ( 'Unknown format target' )
if value is None or value == '' :
return ''
# If the number ends with " s " or another unit , remove it .
# Units should not occur in table cells , but in the table head .
number_str = util . remove_unit ( str ( value ) . strip ( ) )
number = float ( number_str )
if isnan ( number ) :
return 'NaN'
elif number == inf :
return 'Inf'
elif number == - inf :
return '-Inf'
# Apply the scale factor to the value
if self . scale_factor is not None :
number *= self . scale_factor
number_of_significant_digits = self . number_of_significant_digits
max_dec_digits = 0
if number_of_significant_digits is None and format_target is "tooltip_stochastic" :
return str ( round ( number , DEFAULT_TOOLTIP_PRECISION ) )
elif self . type . type == ColumnType . measure :
if number_of_significant_digits is None and format_target is not "csv" :
number_of_significant_digits = DEFAULT_TIME_PRECISION
max_dec_digits = self . type . max_decimal_digits
if number_of_significant_digits is not None :
current_significant_digits = _get_significant_digits ( number_str )
return _format_number ( number , current_significant_digits , number_of_significant_digits , max_dec_digits , isToAlign , format_target )
else :
if number == float ( number_str ) or isnan ( number ) or isinf ( number ) : # TODO remove as soon as scaled values are handled correctly
return number_str
if int ( number ) == number :
number = int ( number )
return str ( number ) |
def getNeighborProc ( self , proc , offset , periodic = None ) :
"""Get the neighbor to a processor
@ param proc the reference processor rank
@ param offset displacement , e . g . ( 1 , 0 ) for north , ( 0 , - 1 ) for west , . . .
@ param periodic boolean list of True / False values , True if axis is
periodic , False otherwise
@ note will return None if there is no neighbor""" | if self . mit is None : # no decomp , just exit
return None
inds = [ self . mit . getIndicesFromBigIndex ( proc ) [ d ] + offset [ d ] for d in range ( self . ndims ) ]
if periodic is not None and self . decomp is not None : # apply modulo operation on periodic axes
for d in range ( self . ndims ) :
if periodic [ d ] :
inds [ d ] = inds [ d ] % self . decomp [ d ]
if self . mit . areIndicesValid ( inds ) :
return self . mit . getBigIndexFromIndices ( inds )
else :
return None |
def exe_to_egg ( self , dist_filename , egg_tmp ) :
"""Extract a bdist _ wininst to the directories an egg would use""" | # Check for . pth file and set up prefix translations
prefixes = get_exe_prefixes ( dist_filename )
to_compile = [ ]
native_libs = [ ]
top_level = { }
def process ( src , dst ) :
s = src . lower ( )
for old , new in prefixes :
if s . startswith ( old ) :
src = new + src [ len ( old ) : ]
parts = src . split ( '/' )
dst = os . path . join ( egg_tmp , * parts )
dl = dst . lower ( )
if dl . endswith ( '.pyd' ) or dl . endswith ( '.dll' ) :
parts [ - 1 ] = bdist_egg . strip_module ( parts [ - 1 ] )
top_level [ os . path . splitext ( parts [ 0 ] ) [ 0 ] ] = 1
native_libs . append ( src )
elif dl . endswith ( '.py' ) and old != 'SCRIPTS/' :
top_level [ os . path . splitext ( parts [ 0 ] ) [ 0 ] ] = 1
to_compile . append ( dst )
return dst
if not src . endswith ( '.pth' ) :
log . warn ( "WARNING: can't process %s" , src )
return None
# extract , tracking . pyd / . dll - > native _ libs and . py - > to _ compile
unpack_archive ( dist_filename , egg_tmp , process )
stubs = [ ]
for res in native_libs :
if res . lower ( ) . endswith ( '.pyd' ) : # create stubs for . pyd ' s
parts = res . split ( '/' )
resource = parts [ - 1 ]
parts [ - 1 ] = bdist_egg . strip_module ( parts [ - 1 ] ) + '.py'
pyfile = os . path . join ( egg_tmp , * parts )
to_compile . append ( pyfile )
stubs . append ( pyfile )
bdist_egg . write_stub ( resource , pyfile )
self . byte_compile ( to_compile )
# compile . py ' s
bdist_egg . write_safety_flag ( os . path . join ( egg_tmp , 'EGG-INFO' ) , bdist_egg . analyze_egg ( egg_tmp , stubs ) )
# write zip - safety flag
for name in 'top_level' , 'native_libs' :
if locals ( ) [ name ] :
txt = os . path . join ( egg_tmp , 'EGG-INFO' , name + '.txt' )
if not os . path . exists ( txt ) :
f = open ( txt , 'w' )
f . write ( '\n' . join ( locals ( ) [ name ] ) + '\n' )
f . close ( ) |
def from_str ( string ) :
"""Generate a ` SetFinishedEvent ` object from a string""" | match = re . match ( r'^UPDATE (.+)$' , string )
if match :
parsed_date = dateutil . parser . parse ( match . group ( 1 ) , ignoretz = True )
return UpdateEvent ( parsed_date )
else :
raise EventParseError |
def checkstyle ( self , sources ) :
"""Iterate over sources and run checker on each file .
Files can be suppressed with a - - suppress option which takes an xml file containing
file paths that have exceptions and the plugins they need to ignore .
: param sources : iterable containing source file names .
: return : ( int ) number of failures""" | failure_count = 0
for filename in sources :
failure_count += self . _check_file ( filename )
return failure_count |
def _transform_value ( value , policy , transform_type ) :
'''helper function to transform the policy value into something that more
closely matches how the policy is displayed in the gpedit GUI''' | t_kwargs = { }
if 'Transform' in policy :
if transform_type in policy [ 'Transform' ] :
_policydata = _policy_info ( )
if transform_type + 'Args' in policy [ 'Transform' ] :
t_kwargs = policy [ 'Transform' ] [ transform_type + 'Args' ]
return getattr ( _policydata , policy [ 'Transform' ] [ transform_type ] ) ( value , ** t_kwargs )
else :
return value
else :
if 'Registry' in policy :
if value == '(value not set)' :
return 'Not Defined'
return value |
def lexicase ( self , F , num_selections = None , survival = False ) :
"""conducts lexicase selection for de - aggregated fitness vectors""" | if num_selections is None :
num_selections = F . shape [ 0 ]
winners = [ ]
locs = [ ]
individual_locs = np . arange ( F . shape [ 0 ] )
for i in np . arange ( num_selections ) :
can_locs = individual_locs
cases = list ( np . arange ( F . shape [ 1 ] ) )
self . random_state . shuffle ( cases )
# pdb . set _ trace ( )
while len ( cases ) > 0 and len ( can_locs ) > 1 : # get best fitness for case among candidates
best_val_for_case = np . min ( F [ can_locs , cases [ 0 ] ] )
# filter individuals without an elite fitness on this case
can_locs = [ l for l in can_locs if F [ l , cases [ 0 ] ] <= best_val_for_case ]
cases . pop ( 0 )
choice = self . random_state . randint ( len ( can_locs ) )
locs . append ( can_locs [ choice ] )
if survival : # filter out winners from remaining selection pool
individual_locs = [ i for i in individual_locs if i != can_locs [ choice ] ]
while len ( locs ) < num_selections :
locs . append ( individual_locs [ 0 ] )
return locs |
def get_sample_stats ( fit , log_likelihood = None ) :
"""Extract sample stats from PyStan fit .""" | dtypes = { "divergent__" : bool , "n_leapfrog__" : np . int64 , "treedepth__" : np . int64 }
ndraws = [ s - w for s , w in zip ( fit . sim [ "n_save" ] , fit . sim [ "warmup2" ] ) ]
extraction = OrderedDict ( )
for chain , ( pyholder , ndraws ) in enumerate ( zip ( fit . sim [ "samples" ] , ndraws ) ) :
if chain == 0 :
for key in pyholder [ "sampler_param_names" ] :
extraction [ key ] = [ ]
for key , values in zip ( pyholder [ "sampler_param_names" ] , pyholder [ "sampler_params" ] ) :
extraction [ key ] . append ( values [ - ndraws : ] )
data = OrderedDict ( )
for key , values in extraction . items ( ) :
values = np . stack ( values , axis = 0 )
dtype = dtypes . get ( key )
values = values . astype ( dtype )
name = re . sub ( "__$" , "" , key )
name = "diverging" if name == "divergent" else name
data [ name ] = values
# log _ likelihood
if log_likelihood is not None :
log_likelihood_data = get_draws ( fit , variables = log_likelihood )
data [ "log_likelihood" ] = log_likelihood_data [ log_likelihood ]
# lp _ _
stat_lp = get_draws ( fit , variables = "lp__" )
data [ "lp" ] = stat_lp [ "lp__" ]
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.