signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def yum_update ( downloadonly = False , dest_dir = '/tmp' ) :
"""Run a yum update on this system
This public method runs the yum - y update command to update
packages from yum . If downloadonly is set to true , the yum
updates will be downloaded to the specified dest _ dir .
: param dest _ dir : ( str ) Full path to the download directory
: param downloadonly : Boolean
: return : int exit code from the yum command
: raises CommandError"""
|
log = logging . getLogger ( mod_logger + '.yum_update' )
# Type checks on the args
if not isinstance ( dest_dir , basestring ) :
msg = 'dest_dir argument must be a string'
log . error ( msg )
raise CommandError ( msg )
if not isinstance ( downloadonly , bool ) :
msg = 'downloadonly argument must be a bool'
log . error ( msg )
raise CommandError ( msg )
# If downloadonly was True , download packages to dest _ dir
if downloadonly : # Create the destination directory if it does not exist
log . info ( 'Creating directory: %s' , dest_dir )
try :
mkdir_p ( dest_dir )
except OSError :
_ , ex , trace = sys . exc_info ( )
msg = 'Unable to create destination directory: {d}' . format ( d = dest_dir )
log . error ( msg )
raise CommandError , msg , trace
# Build command string with downloadonly options specified
command = [ 'yum' , '-y' , 'update' , '--downloadonly' , '--downloaddir={d}' . format ( d = dest_dir ) ]
log . info ( 'Downloading updates from yum to %s...' , dest_dir )
else : # Build command string to update directly
command = [ 'yum' , '-y' , 'update' ]
log . info ( 'Installing yum updates from RHN...' )
# Run the command
try :
result = run_command ( command )
except CommandError :
raise
log . info ( 'Yum update completed and exit with code: {c}' . format ( c = result [ 'code' ] ) )
return result [ 'code' ]
|
def dataset_exists ( dataset , data_home = None ) :
"""Checks to see if a directory with the name of the specified dataset exists
in the data home directory , found with ` ` get _ data _ home ` ` .
Parameters
dataset : str
The name of the dataset ; should either be a folder in data home or
specified in the yellowbrick . datasets . DATASETS variable .
data _ home : str , optional
The path on disk where data is stored . If not passed in , it is looked
up from YELLOWBRICK _ DATA or the default returned by ` ` get _ data _ home ` ` .
Returns
exists : bool
If a folder with the dataset name is in the data home directory ."""
|
data_home = get_data_home ( data_home )
path = os . path . join ( data_home , dataset )
return os . path . exists ( path ) and os . path . isdir ( path )
|
def fetch_all ( self , R , depth = 1 , ** kwargs ) :
"Request multiple objects from API"
|
d , e = self . _fetcher . fetch_all ( R , depth , kwargs )
if e :
raise e
return d
|
def to_bool ( val ) :
'''Returns the logical value .
. . code - block : : jinja
{ { ' yes ' | to _ bool } }
will be rendered as :
. . code - block : : text
True'''
|
if val is None :
return False
if isinstance ( val , bool ) :
return val
if isinstance ( val , ( six . text_type , six . string_types ) ) :
return val . lower ( ) in ( 'yes' , '1' , 'true' )
if isinstance ( val , six . integer_types ) :
return val > 0
if not isinstance ( val , collections . Hashable ) :
return bool ( val )
return False
|
def update_reminder_item ( self , reminder_item_id , reminder_item_dict ) :
"""Updates a reminder item
: param reminder _ item _ id : the reminder item id
: param reminder _ item _ dict : dict
: return : dict"""
|
return self . _create_put_request ( resource = REMINDER_ITEMS , billomat_id = reminder_item_id , send_data = reminder_item_dict )
|
def branch ( self ) :
"""Return whether the project is on master branch"""
|
result = get_branch ( repo = self . repo )
if result is None :
result = get_travis_branch ( )
return result
|
def escape ( s , quote = False ) :
"""Replace special characters " & " , " < " and " > " to HTML - safe sequences . If
the optional flag ` quote ` is ` True ` , the quotation mark character is
also translated .
There is a special handling for ` None ` which escapes to an empty string .
: param s : the string to escape .
: param quote : set to true to also escape double quotes ."""
|
if s is None :
return ''
if hasattr ( s , '__html__' ) :
return s . __html__ ( )
if not isinstance ( s , ( text_type , binary_type ) ) :
s = text_type ( s )
if isinstance ( s , binary_type ) :
try :
s . decode ( 'ascii' )
except :
s = s . decode ( 'utf-8' , 'replace' )
s = s . replace ( '&' , '&' ) . replace ( '<' , '<' ) . replace ( '>' , '>' )
if quote :
s = s . replace ( '"' , """ )
return s
|
def use_educated_guess ( self ) :
"""Tries to guess the proper library names , include and library paths
if everything else failed ."""
|
preprocess_fallback_config ( )
global LIBIGRAPH_FALLBACK_LIBRARIES
global LIBIGRAPH_FALLBACK_INCLUDE_DIRS
global LIBIGRAPH_FALLBACK_LIBRARY_DIRS
print ( "WARNING: we were not able to detect where igraph is installed on" )
print ( "your machine (if it is installed at all). We will use the fallback" )
print ( "library and include paths hardcoded in setup.py and hope that the" )
print ( "C core of igraph is installed there." )
print ( "" )
print ( "If the compilation fails and you are sure that igraph is installed" )
print ( "on your machine, adjust the following two variables in setup.py" )
print ( "accordingly and try again:" )
print ( "" )
print ( "- LIBIGRAPH_FALLBACK_INCLUDE_DIRS" , LIBIGRAPH_FALLBACK_INCLUDE_DIRS )
print ( "- LIBIGRAPH_FALLBACK_LIBRARY_DIRS" , LIBIGRAPH_FALLBACK_LIBRARY_DIRS )
print ( "" )
seconds_remaining = 10 if self . wait else 0
while seconds_remaining > 0 :
if seconds_remaining > 1 :
plural = "s"
else :
plural = ""
sys . stdout . write ( "\rContinuing in %2d second%s; press Enter to continue " "immediately. " % ( seconds_remaining , plural ) )
sys . stdout . flush ( )
if os . name == 'nt' :
if msvcrt . kbhit ( ) :
if msvcrt . getch ( ) == b'\r' : # not ' \ n '
break
time . sleep ( 1 )
else :
rlist , _ , _ = select ( [ sys . stdin ] , [ ] , [ ] , 1 )
if rlist :
sys . stdin . readline ( )
break
seconds_remaining -= 1
sys . stdout . write ( "\r" + " " * 65 + "\r" )
self . libraries = LIBIGRAPH_FALLBACK_LIBRARIES [ : ]
if self . static_extension :
self . libraries . extend ( [ "xml2" , "z" , "m" , "stdc++" ] )
self . include_dirs = LIBIGRAPH_FALLBACK_INCLUDE_DIRS [ : ]
self . library_dirs = LIBIGRAPH_FALLBACK_LIBRARY_DIRS [ : ]
|
def verify_url ( url , secret_key , ** kwargs ) :
"""Verify a signed URL ( excluding the domain and scheme ) .
: param url : URL to sign
: param secret _ key : Secret key
: rtype : bool
: raises : URLError"""
|
result = urlparse ( url )
query_args = MultiValueDict ( parse_qs ( result . query ) )
return verify_url_path ( result . path , query_args , secret_key , ** kwargs )
|
def getItem ( self , itemID ) :
""": desc : Given an ID return the note JSON object
{ u ' note ' : u ' note8 ' ,
u ' ID ' : 3.0,
u ' tags ' : [ u ' 8 ' ] ,
u ' timestamps ' : [ 1381719620.315899 ] }
: param int itemID : The item ID , an integer
: returns : The matching note
: rval : int"""
|
collections = self . get_data_collections ( )
itemID = scrubID ( itemID )
for coll in collections :
note = self . noteDB [ coll ] . find_one ( { "ID" : itemID } )
if note is not None :
del note [ "_id" ]
note [ 'type' ] = coll
break
return note
|
def _pfp__add_child ( self , name , child , stream = None ) :
"""Add a child to the Union field
: name : The name of the child
: child : A : class : ` . Field ` instance
: returns : The resulting field"""
|
res = super ( Union , self ) . _pfp__add_child ( name , child )
self . _pfp__buff . seek ( 0 , 0 )
child . _pfp__build ( stream = self . _pfp__buff )
size = len ( self . _pfp__buff . getvalue ( ) )
self . _pfp__buff . seek ( 0 , 0 )
if stream is not None :
curr_pos = stream . tell ( )
stream . seek ( curr_pos - size , 0 )
return res
|
def get_context_data ( self , ** kwargs ) :
"""Allow adding a ' render _ description ' parameter"""
|
context = super ( ScheduleXmlView , self ) . get_context_data ( ** kwargs )
if self . request . GET . get ( 'render_description' , None ) == '1' :
context [ 'render_description' ] = True
else :
context [ 'render_description' ] = False
return context
|
def movMF ( X , n_clusters , posterior_type = "soft" , force_weights = None , n_init = 10 , n_jobs = 1 , max_iter = 300 , verbose = False , init = "random-class" , random_state = None , tol = 1e-6 , copy_x = True , ) :
"""Wrapper for parallelization of _ movMF and running n _ init times ."""
|
if n_init <= 0 :
raise ValueError ( "Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init )
random_state = check_random_state ( random_state )
if max_iter <= 0 :
raise ValueError ( "Number of iterations should be a positive number," " got %d instead" % max_iter )
best_inertia = np . infty
X = as_float_array ( X , copy = copy_x )
tol = _tolerance ( X , tol )
if hasattr ( init , "__array__" ) :
init = check_array ( init , dtype = X . dtype . type , copy = True )
_validate_center_shape ( X , n_clusters , init )
if n_init != 1 :
warnings . warn ( "Explicit initial center position passed: " "performing only one init in k-means instead of n_init=%d" % n_init , RuntimeWarning , stacklevel = 2 , )
n_init = 1
# defaults
best_centers = None
best_labels = None
best_weights = None
best_concentrations = None
best_posterior = None
best_inertia = None
if n_jobs == 1 : # For a single thread , less memory is needed if we just store one set
# of the best results ( as opposed to one set per run per thread ) .
for it in range ( n_init ) : # cluster on the sphere
( centers , weights , concentrations , posterior , labels , inertia ) = _movMF ( X , n_clusters , posterior_type = posterior_type , force_weights = force_weights , max_iter = max_iter , verbose = verbose , init = init , random_state = random_state , tol = tol , )
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia :
best_centers = centers . copy ( )
best_labels = labels . copy ( )
best_weights = weights . copy ( )
best_concentrations = concentrations . copy ( )
best_posterior = posterior . copy ( )
best_inertia = inertia
else : # parallelisation of movMF runs
seeds = random_state . randint ( np . iinfo ( np . int32 ) . max , size = n_init )
results = Parallel ( n_jobs = n_jobs , verbose = 0 ) ( delayed ( _movMF ) ( X , n_clusters , posterior_type = posterior_type , force_weights = force_weights , max_iter = max_iter , verbose = verbose , init = init , random_state = random_state , tol = tol , ) for seed in seeds )
# Get results with the lowest inertia
centers , weights , concentrations , posteriors , labels , inertia = zip ( * results )
best = np . argmin ( inertia )
best_labels = labels [ best ]
best_inertia = inertia [ best ]
best_centers = centers [ best ]
best_concentrations = concentrations [ best ]
best_posterior = posteriors [ best ]
best_weights = weights [ best ]
return ( best_centers , best_labels , best_inertia , best_weights , best_concentrations , best_posterior , )
|
def _check_job_status ( self , job , desc , status_key_name ) :
"""Check to see if the job completed successfully and , if not , construct and
raise a ValueError .
Args :
job ( str ) : The name of the job to check .
desc ( dict [ str , str ] ) : The result of ` ` describe _ training _ job ( ) ` ` .
status _ key _ name ( str ) : Status key name to check for .
Raises :
ValueError : If the training job fails ."""
|
status = desc [ status_key_name ]
# If the status is capital case , then convert it to Camel case
status = _STATUS_CODE_TABLE . get ( status , status )
if status != 'Completed' and status != 'Stopped' :
reason = desc . get ( 'FailureReason' , '(No reason provided)' )
job_type = status_key_name . replace ( 'JobStatus' , ' job' )
raise ValueError ( 'Error for {} {}: {} Reason: {}' . format ( job_type , job , status , reason ) )
|
def _UpdateCampaignDSASetting ( client , campaign_id , feed_id ) :
"""Updates the campaign DSA setting to DSA pagefeeds .
Args :
client : an AdWordsClient instance .
campaign _ id : a str Campaign ID .
feed _ id : a str page Feed ID .
Raises :
ValueError : If the given campaign is found not to be a dynamic search ad
campaign ."""
|
# Get the CampaignService .
campaign_service = client . GetService ( 'CampaignService' , version = 'v201809' )
selector = { 'fields' : [ 'Id' , 'Settings' ] , 'predicates' : [ { 'field' : 'Id' , 'operator' : 'EQUALS' , 'values' : [ campaign_id ] } ] }
response = campaign_service . get ( selector )
if response [ 'totalNumEntries' ] :
campaign = response [ 'entries' ] [ 0 ]
else :
raise ValueError ( 'No campaign with ID "%d" exists.' % campaign_id )
if not campaign [ 'settings' ] :
raise ValueError ( 'This is not a DSA campaign.' )
dsa_setting = None
campaign_settings = campaign [ 'settings' ]
for setting in campaign_settings :
if setting [ 'Setting.Type' ] == 'DynamicSearchAdsSetting' :
dsa_setting = setting
break
if dsa_setting is None :
raise ValueError ( 'This is not a DSA campaign.' )
dsa_setting [ 'pageFeed' ] = { 'feedIds' : [ feed_id ] }
# Optional : Specify whether only the supplied URLs should be used with your
# Dynamic Search Ads .
dsa_setting [ 'useSuppliedUrlsOnly' ] = True
operation = { 'operand' : { 'id' : campaign_id , 'settings' : campaign_settings } , 'operator' : 'SET' }
campaign_service . mutate ( [ operation ] )
print 'DSA page feed for campaign ID "%d" was updated with feed ID "%d".' % ( campaign_id , feed_id )
|
def calculated_intervals ( self ) :
"""Gets the calculated intervals from the database
: return : The calculated intervals"""
|
if self . _calculated_intervals is None :
logging . debug ( "get calculated intervals" )
self . load ( )
return self . mongo_model . get_calculated_intervals ( )
return self . _calculated_intervals
|
def browse_home_listpage_url ( self , state = None , county = None , zipcode = None , street = None , ** kwargs ) :
"""Construct an url of home list page by state , county , zipcode , street .
Example :
- https : / / www . zillow . com / browse / homes / ca /
- https : / / www . zillow . com / browse / homes / ca / los - angeles - county /
- https : / / www . zillow . com / browse / homes / ca / los - angeles - county / 91001/
- https : / / www . zillow . com / browse / homes / ca / los - angeles - county / 91001 / tola - ave _ 5038895/"""
|
url = self . domain_browse_homes
for item in [ state , county , zipcode , street ] :
if item :
url = url + "/%s" % item
url = url + "/"
return url
|
def get_current_snapshot_name ( vm ) :
"""Returns the name of the current snapshot
: param vm : Virtual machine to find current snapshot name
: return : Snapshot name
: rtype str"""
|
all_snapshots = SnapshotRetriever . get_vm_snapshots ( vm )
# noinspection PyProtectedMember
if not vm . snapshot :
return None
current_snapshot_id = vm . snapshot . currentSnapshot . _moId
for snapshot_name in all_snapshots . keys ( ) : # noinspection PyProtectedMember
if all_snapshots [ snapshot_name ] . _moId == current_snapshot_id :
return snapshot_name
return None
|
def cli ( env , context_id ) :
"""Request configuration of a tunnel context .
This action will update the advancedConfigurationFlag on the context
instance and further modifications against the context will be prevented
until all changes can be propgated to network devices ."""
|
manager = SoftLayer . IPSECManager ( env . client )
# ensure context can be retrieved by given id
manager . get_tunnel_context ( context_id )
succeeded = manager . apply_configuration ( context_id )
if succeeded :
env . out ( 'Configuration request received for context #{}' . format ( context_id ) )
else :
raise CLIHalt ( 'Failed to enqueue configuration request for context #{}' . format ( context_id ) )
|
def build_latex ( hyp ) :
"""Parameters
hyp : dict
{ ' segmentation ' : [ [ 0 , 3 ] , [ 1 , 2 ] ] ,
' symbols ' : [ { ' symbol ' : ID , ' probability ' : 0.12 } ] ,
' geometry ' : { ' symbol ' : index ,
' bottom ' : None or dict ,
' subscript ' : None or dict ,
' right ' : None or dict ,
' superscript ' : None or dict ,
' top ' : None or dict } ,
' probability ' : 0.123"""
|
latex = [ ]
for symbol in hyp [ 'symbols' ] :
latex . append ( symbol [ 'symbol' ] . split ( ";" ) [ 1 ] )
return " " . join ( latex )
|
def chars ( string : any ) -> str :
"""Return all ( and only ) the chars in the given string ."""
|
return '' . join ( [ c if c . isalpha ( ) else '' for c in str ( string ) ] )
|
def _parse_ident ( self ) :
"""Parse an identifier and return it ( possibly an empty string ) .
Updates ` ` pos ` ` ."""
|
remainder = self . string [ self . pos : ]
ident = re . match ( r'\w*' , remainder ) . group ( 0 )
self . pos += len ( ident )
return ident
|
def load_SampleData ( ) -> Tuple [ pandas . DataFrame , pandas . DataFrame ] :
'''Load sample data for quickly starting a demo run .
Returns
df _ state _ init , df _ forcing : Tuple [ pandas . DataFrame , pandas . DataFrame ]
- df _ state _ init : ` initial model states < df _ state _ var > `
- df _ forcing : ` forcing data < df _ forcing _ var > `
Examples
> > > df _ state _ init , df _ forcing = supy . load _ SampleData ( )'''
|
path_SampleData = Path ( path_supy_module ) / 'sample_run'
path_runcontrol = path_SampleData / 'RunControl.nml'
df_state_init = init_supy ( path_runcontrol )
# path _ input = path _ runcontrol . parent / ser _ mod _ cfg [ ' fileinputpath ' ]
df_forcing = load_forcing_grid ( path_runcontrol , df_state_init . index [ 0 ] )
return df_state_init , df_forcing
|
def Get ( self , key ) :
"""Get network by providing name , ID , or other unique key .
If key is not unique and finds multiple matches only the first
will be returned"""
|
for network in self . networks :
try :
if network . id == key :
return ( network )
if network . name == key :
return ( network )
if network . cidr == key :
return ( network )
except : # We ignore malformed records with missing attributes
pass
|
def set_details ( self , details , ** kwargs ) :
""": param details : Details to set for the object
: type details : dict or list
: raises : : class : ` ~ dxpy . exceptions . DXAPIError ` if the object is not in the " open " state
Sets the details for the remote object with the specified value .
If the input contains the string ` ` " $ dnanexus _ link " ` ` as a key
in a hash , it must be the only key in the hash , and its value
must be a valid ID of an existing object ."""
|
return self . _set_details ( self . _dxid , details , ** kwargs )
|
def _gather_exposed_methods ( self ) :
"""Searches for the exposed methods in the current microservice class . A method is considered
exposed if it is decorated with the : py : func : ` gemstone . public _ method ` or
: py : func : ` gemstone . private _ api _ method ` ."""
|
self . _extract_methods_from_container ( self )
for module in self . modules :
self . _extract_methods_from_container ( module )
|
def ParseChat ( self , parser_mediator , query , row , ** unused_kwargs ) :
"""Parses a chat message .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
query ( str ) : query that created the row .
row ( sqlite3 . Row ) : row resulting from query ."""
|
query_hash = hash ( query )
participants = self . _GetRowValue ( query_hash , row , 'participants' )
author = self . _GetRowValue ( query_hash , row , 'author' )
dialog_partner = self . _GetRowValue ( query_hash , row , 'dialog_partner' )
from_displayname = self . _GetRowValue ( query_hash , row , 'from_displayname' )
accounts = [ ]
participants = participants . split ( ' ' )
for participant in participants :
if participant != author :
accounts . append ( participant )
to_account = ', ' . join ( accounts )
if not to_account :
to_account = dialog_partner or 'Unknown User'
from_account = '{0:s} <{1:s}>' . format ( from_displayname , author )
event_data = SkypeChatEventData ( )
event_data . from_account = from_account
event_data . query = query
event_data . text = self . _GetRowValue ( query_hash , row , 'body_xml' )
event_data . title = self . _GetRowValue ( query_hash , row , 'title' )
event_data . to_account = to_account
timestamp = self . _GetRowValue ( query_hash , row , 'timestamp' )
if timestamp :
date_time = dfdatetime_posix_time . PosixTime ( timestamp = timestamp )
event = time_events . DateTimeValuesEvent ( date_time , 'Chat from Skype' )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def state ( self , filter_nodes = None , filter_routing_table = None , filter_metadata = None , filter_blocks = None , filter_indices = None ) :
"""Retrieve the : ref : ` cluster state < es - guide - reference - api - admin - cluster - state > ` .
: param filter _ nodes : set to * * true * * to filter out the * * nodes * * part
of the response .
: param filter _ routing _ table : set to * * true * * to filter out the
* * routing _ table * * part of the response .
: param filter _ metadata : set to * * true * * to filter out the * * metadata * *
part of the response .
: param filter _ blocks : set to * * true * * to filter out the * * blocks * *
part of the response .
: param filter _ indices : when not filtering metadata , a comma separated
list of indices to include in the response ."""
|
path = make_path ( "_cluster" , "state" )
parameters = { }
if filter_nodes is not None :
parameters [ 'filter_nodes' ] = filter_nodes
if filter_routing_table is not None :
parameters [ 'filter_routing_table' ] = filter_routing_table
if filter_metadata is not None :
parameters [ 'filter_metadata' ] = filter_metadata
if filter_blocks is not None :
parameters [ 'filter_blocks' ] = filter_blocks
if filter_blocks is not None :
if isinstance ( filter_indices , six . string_types ) :
parameters [ 'filter_indices' ] = filter_indices
else :
parameters [ 'filter_indices' ] = "," . join ( filter_indices )
return self . conn . _send_request ( 'GET' , path , params = parameters )
|
def admin_tools_render_dashboard_module ( context , module ) :
"""Template tag that renders a given dashboard module , it takes a
` ` DashboardModule ` ` instance as first parameter ."""
|
module . init_with_context ( context )
context . update ( { 'template' : module . template , 'module' : module , 'admin_url' : reverse ( '%s:index' % get_admin_site_name ( context ) ) , } )
return context
|
def listFigures ( self , walkTrace = tuple ( ) , case = None , element = None ) :
"""List section figures ."""
|
if case == 'sectionmain' :
print ( walkTrace , self . title )
if case == 'figure' :
caption , fig = element
try :
print ( walkTrace , fig . _leopardref , caption )
except AttributeError :
fig . _leopardref = next ( self . _reportSection . _fignr )
print ( walkTrace , fig . _leopardref , caption )
|
def decorator ( caller , _func = None ) :
"""decorator ( caller ) converts a caller function into a decorator"""
|
if _func is not None : # return a decorated function
# this is obsolete behavior ; you should use decorate instead
return decorate ( _func , caller )
# else return a decorator function
defaultargs , defaults = '' , ( )
if inspect . isclass ( caller ) :
name = caller . __name__ . lower ( )
doc = 'decorator(%s) converts functions/generators into ' 'factories of %s objects' % ( caller . __name__ , caller . __name__ )
elif inspect . isfunction ( caller ) :
if caller . __name__ == '<lambda>' :
name = '_lambda_'
else :
name = caller . __name__
doc = caller . __doc__
nargs = caller . __code__ . co_argcount
ndefs = len ( caller . __defaults__ or ( ) )
defaultargs = ', ' . join ( caller . __code__ . co_varnames [ nargs - ndefs : nargs ] )
if defaultargs :
defaultargs += ','
defaults = caller . __defaults__
else : # assume caller is an object with a _ _ call _ _ method
name = caller . __class__ . __name__ . lower ( )
doc = caller . __call__ . __doc__
evaldict = dict ( _call = caller , _decorate_ = decorate )
dec = FunctionMaker . create ( '%s(func, %s)' % ( name , defaultargs ) , 'if func is None: return lambda func: _decorate_(func, _call, (%s))\n' 'return _decorate_(func, _call, (%s))' % ( defaultargs , defaultargs ) , evaldict , doc = doc , module = caller . __module__ , __wrapped__ = caller )
if defaults :
dec . __defaults__ = ( None , ) + defaults
return dec
|
def update_config ( updated_project ) :
'''Update project in configuration
args :
updated _ project ( dict ) : Updated project configuration values'''
|
home = os . path . expanduser ( '~' )
if os . path . isfile ( os . path . join ( home , '.transfer' , 'config.yaml' ) ) :
with open ( os . path . join ( home , '.transfer' , 'config.yaml' ) , 'r' ) as fp :
projects = yaml . load ( fp . read ( ) )
replace_index = - 1
for i , project in enumerate ( projects ) :
if project [ 'name' ] == updated_project [ 'name' ] :
replace_index = i
if replace_index > - 1 :
projects [ replace_index ] = updated_project
store_config ( projects )
else :
print ( 'Not saving configuration' )
print ( colored ( 'Project: ' + updated_project [ 'name' ] + ' was not found in configured projects!' , 'red' ) )
else :
print ( 'Transfer is not configured.' )
print ( 'Please run:' )
print ( '' )
print ( colored ( ' transfer --configure' , 'cyan' ) )
return
|
def plot ( self , n = 500 , eigenvalues = None , sum = None , title = None , ax = None , ** kwargs ) :
r"""Docstring overloaded at import time ."""
|
from pygsp . plotting import _plot_filter
return _plot_filter ( self , n = n , eigenvalues = eigenvalues , sum = sum , title = title , ax = ax , ** kwargs )
|
def build_image ( image_path , image_name , build_args = None , dockerfile_path = None ) :
"""Build an image
Args :
image _ path ( str ) : the path to the image directory
image _ name ( str ) : image ' name : tag ' to build
build _ args ( dict , optional ) : dict of docker build arguments
dockerfile _ path ( str , optional ) :
path to dockerfile relative to image _ path
if not ` image _ path / Dockerfile ` ."""
|
cmd = [ 'docker' , 'build' , '-t' , image_name , image_path ]
if dockerfile_path :
cmd . extend ( [ '-f' , dockerfile_path ] )
for k , v in ( build_args or { } ) . items ( ) :
cmd += [ '--build-arg' , '{}={}' . format ( k , v ) ]
check_call ( cmd )
|
def get_cursor_vertical_diff ( self ) :
"""Returns the how far down the cursor moved since last render .
Note :
If another get _ cursor _ vertical _ diff call is already in progress ,
immediately returns zero . ( This situation is likely if
get _ cursor _ vertical _ diff is called from a SIGWINCH signal
handler , since sigwinches can happen in rapid succession and
terminal emulators seem not to respond to cursor position
queries before the next sigwinch occurs . )"""
|
# Probably called by a SIGWINCH handler , and therefore
# will do cursor querying until a SIGWINCH doesn ' t happen during
# the query . Calls to the function from a signal handler COULD STILL
# HAPPEN out of order -
# they just can ' t interrupt the actual cursor query .
if self . in_get_cursor_diff :
self . another_sigwinch = True
return 0
cursor_dy = 0
while True :
self . in_get_cursor_diff = True
self . another_sigwinch = False
cursor_dy += self . _get_cursor_vertical_diff_once ( )
self . in_get_cursor_diff = False
if not self . another_sigwinch :
return cursor_dy
|
def _parse_years ( years ) :
"""Parse string of ints include ranges into a ` list ` of ` int `
Source : https : / / stackoverflow . com / a / 6405228/1307974"""
|
result = [ ]
for part in years . split ( ',' ) :
if '-' in part :
a , b = part . split ( '-' )
a , b = int ( a ) , int ( b )
result . extend ( range ( a , b + 1 ) )
else :
a = int ( part )
result . append ( a )
return result
|
def parse_atom_site ( self , name , attributes ) :
'''Parse the atom tag attributes . Most atom tags do not have attributes .'''
|
if name == "PDBx:pdbx_PDB_ins_code" :
assert ( not ( self . current_atom_site . ATOMResidueiCodeIsNull ) )
if attributes . get ( 'xsi:nil' ) == 'true' :
self . current_atom_site . ATOMResidueiCodeIsNull = True
if name == "PDBx:auth_asym_id" :
assert ( not ( self . current_atom_site . PDBChainIDIsNull ) )
if attributes . get ( 'xsi:nil' ) == 'true' :
self . current_atom_site . PDBChainIDIsNull = True
|
def get_size ( conn , vm_ ) :
'''Return the VM ' s size object'''
|
sizes = conn . list_sizes ( )
vm_size = config . get_cloud_config_value ( 'size' , vm_ , __opts__ )
if not vm_size :
return sizes [ 0 ]
for size in sizes :
if vm_size and six . text_type ( vm_size ) in ( six . text_type ( sizes [ size ] [ 'id' ] ) , six . text_type ( size ) ) :
return sizes [ size ] [ 'id' ]
raise SaltCloudNotFound ( 'The specified size, \'{0}\', could not be found.' . format ( vm_size ) )
|
def absl_to_cpp ( level ) :
"""Converts an absl log level to a cpp log level .
Args :
level : int , an absl . logging level .
Raises :
TypeError : Raised when level is not an integer .
Returns :
The corresponding integer level for use in Abseil C + + ."""
|
if not isinstance ( level , int ) :
raise TypeError ( 'Expect an int level, found {}' . format ( type ( level ) ) )
if level >= 0 : # C + + log levels must be > = 0
return 0
else :
return - level
|
def batch_get_assets_history ( self , parent , content_type , read_time_window , asset_names = None , retry = google . api_core . gapic_v1 . method . DEFAULT , timeout = google . api_core . gapic_v1 . method . DEFAULT , metadata = None , ) :
"""Batch gets the update history of assets that overlap a time window . For
RESOURCE content , this API outputs history with asset in both non - delete
or deleted status . For IAM \ _ POLICY content , this API outputs history
when the asset and its attached IAM POLICY both exist . This can create
gaps in the output history . If a specified asset does not exist , this
API returns an INVALID \ _ ARGUMENT error .
Example :
> > > from google . cloud import asset _ v1
> > > from google . cloud . asset _ v1 import enums
> > > client = asset _ v1 . AssetServiceClient ( )
> > > # TODO : Initialize ` parent ` :
> > > parent = ' '
> > > # TODO : Initialize ` content _ type ` :
> > > content _ type = enums . ContentType . CONTENT _ TYPE _ UNSPECIFIED
> > > # TODO : Initialize ` read _ time _ window ` :
> > > read _ time _ window = { }
> > > response = client . batch _ get _ assets _ history ( parent , content _ type , read _ time _ window )
Args :
parent ( str ) : Required . The relative name of the root asset . It can only be an
organization number ( such as " organizations / 123 " ) , a project ID ( such as
" projects / my - project - id " ) " , or a project number ( such as " projects / 12345 " ) .
content _ type ( ~ google . cloud . asset _ v1 . types . ContentType ) : Required . The content type .
read _ time _ window ( Union [ dict , ~ google . cloud . asset _ v1 . types . TimeWindow ] ) : Optional . The time window for the asset history . Both start \ _ time and
end \ _ time are optional and if set , it must be after 2018-10-02 UTC . If
end \ _ time is not set , it is default to current timestamp . If start \ _ time
is not set , the snapshot of the assets at end \ _ time will be returned .
The returned results contain all temporal assets whose time window
overlap with read \ _ time \ _ window .
If a dict is provided , it must be of the same form as the protobuf
message : class : ` ~ google . cloud . asset _ v1 . types . TimeWindow `
asset _ names ( list [ str ] ) : A list of the full names of the assets . For example :
` ` / / compute . googleapis . com / projects / my _ project _ 123 / zones / zone1 / instances / instance1 ` ` .
See ` Resource
Names < https : / / cloud . google . com / apis / design / resource _ names # full _ resource _ name > ` _ _
and ` Resource Name
Format < https : / / cloud . google . com / resource - manager / docs / cloud - asset - inventory / resource - name - format > ` _ _
for more info .
The request becomes a no - op if the asset name list is empty , and the max
size of the asset name list is 100 in one request .
retry ( Optional [ google . api _ core . retry . Retry ] ) : A retry object used
to retry requests . If ` ` None ` ` is specified , requests will not
be retried .
timeout ( Optional [ float ] ) : The amount of time , in seconds , to wait
for the request to complete . Note that if ` ` retry ` ` is
specified , the timeout applies to each individual attempt .
metadata ( Optional [ Sequence [ Tuple [ str , str ] ] ] ) : Additional metadata
that is provided to the method .
Returns :
A : class : ` ~ google . cloud . asset _ v1 . types . BatchGetAssetsHistoryResponse ` instance .
Raises :
google . api _ core . exceptions . GoogleAPICallError : If the request
failed for any reason .
google . api _ core . exceptions . RetryError : If the request failed due
to a retryable error and retry attempts failed .
ValueError : If the parameters are invalid ."""
|
# Wrap the transport method to add retry and timeout logic .
if "batch_get_assets_history" not in self . _inner_api_calls :
self . _inner_api_calls [ "batch_get_assets_history" ] = google . api_core . gapic_v1 . method . wrap_method ( self . transport . batch_get_assets_history , default_retry = self . _method_configs [ "BatchGetAssetsHistory" ] . retry , default_timeout = self . _method_configs [ "BatchGetAssetsHistory" ] . timeout , client_info = self . _client_info , )
request = asset_service_pb2 . BatchGetAssetsHistoryRequest ( parent = parent , content_type = content_type , read_time_window = read_time_window , asset_names = asset_names , )
return self . _inner_api_calls [ "batch_get_assets_history" ] ( request , retry = retry , timeout = timeout , metadata = metadata )
|
def train ( epochs , ctx ) :
"""Training function ."""
|
if isinstance ( ctx , mx . Context ) :
ctx = [ ctx ]
net . initialize ( mx . init . Xavier ( magnitude = 2 ) , ctx = ctx )
opt_options = { 'learning_rate' : opt . lr , 'wd' : opt . wd }
if opt . optimizer == 'sgd' :
opt_options [ 'momentum' ] = 0.9
if opt . optimizer == 'adam' :
opt_options [ 'epsilon' ] = 1e-7
trainer = gluon . Trainer ( net . collect_params ( ) , opt . optimizer , opt_options , kvstore = opt . kvstore )
if opt . lr_beta > 0.0 : # Jointly train class - specific beta .
# See " sampling matters in deep embedding learning " paper for details .
beta . initialize ( mx . init . Constant ( opt . beta ) , ctx = ctx )
trainer_beta = gluon . Trainer ( [ beta ] , 'sgd' , { 'learning_rate' : opt . lr_beta , 'momentum' : 0.9 } , kvstore = opt . kvstore )
loss = MarginLoss ( margin = opt . margin , nu = opt . nu )
best_val = 0.0
for epoch in range ( epochs ) :
tic = time . time ( )
prev_loss , cumulative_loss = 0.0 , 0.0
# Learning rate schedule .
trainer . set_learning_rate ( get_lr ( opt . lr , epoch , steps , opt . factor ) )
logging . info ( 'Epoch %d learning rate=%f' , epoch , trainer . learning_rate )
if opt . lr_beta > 0.0 :
trainer_beta . set_learning_rate ( get_lr ( opt . lr_beta , epoch , steps , opt . factor ) )
logging . info ( 'Epoch %d beta learning rate=%f' , epoch , trainer_beta . learning_rate )
# Inner training loop .
for i in range ( 200 ) :
batch = train_data . next ( )
data = gluon . utils . split_and_load ( batch . data [ 0 ] , ctx_list = ctx , batch_axis = 0 )
label = gluon . utils . split_and_load ( batch . label [ 0 ] , ctx_list = ctx , batch_axis = 0 )
Ls = [ ]
with ag . record ( ) :
for x , y in zip ( data , label ) :
a_indices , anchors , positives , negatives , _ = net ( x )
if opt . lr_beta > 0.0 :
L = loss ( anchors , positives , negatives , beta , y [ a_indices ] )
else :
L = loss ( anchors , positives , negatives , opt . beta , None )
# Store the loss and do backward after we have done forward
# on all GPUs for better speed on multiple GPUs .
Ls . append ( L )
cumulative_loss += nd . mean ( L ) . asscalar ( )
for L in Ls :
L . backward ( )
# Update .
trainer . step ( batch . data [ 0 ] . shape [ 0 ] )
if opt . lr_beta > 0.0 :
trainer_beta . step ( batch . data [ 0 ] . shape [ 0 ] )
if ( i + 1 ) % opt . log_interval == 0 :
logging . info ( '[Epoch %d, Iter %d] training loss=%f' % ( epoch , i + 1 , cumulative_loss - prev_loss ) )
prev_loss = cumulative_loss
logging . info ( '[Epoch %d] training loss=%f' % ( epoch , cumulative_loss ) )
logging . info ( '[Epoch %d] time cost: %f' % ( epoch , time . time ( ) - tic ) )
names , val_accs = test ( ctx )
for name , val_acc in zip ( names , val_accs ) :
logging . info ( '[Epoch %d] validation: %s=%f' % ( epoch , name , val_acc ) )
if val_accs [ 0 ] > best_val :
best_val = val_accs [ 0 ]
logging . info ( 'Saving %s.' % opt . save_model_prefix )
net . save_parameters ( '%s.params' % opt . save_model_prefix )
return best_val
|
def task_failure_handler ( task_id = None , exception = None , traceback = None , args = None , ** kwargs ) :
"""Task failure handler"""
|
# TODO : find a better way to acces workdir / archive / image
task_report = { 'task_id' : task_id , 'exception' : exception , 'traceback' : traceback , 'archive' : args [ 1 ] [ 'archive_path' ] , 'image' : args [ 1 ] [ 'image' ] }
notifier . send_task_failure_report ( task_report )
workdir = args [ 1 ] [ 'workdir' ]
remove_file ( workdir )
|
def update_hpx_skymap_allsky ( map_in , map_out ) :
"""' Update ' a HEALPix skymap
This checks map _ out exists and creates it from map _ in if it does not .
If map _ out does exist , this adds the data in map _ in to map _ out"""
|
if map_out is None :
in_hpx = map_in . hpx
out_hpx = HPX . create_hpx ( in_hpx . nside , in_hpx . nest , in_hpx . coordsys , None , in_hpx . ebins , None , in_hpx . conv , None )
data_out = map_in . expanded_counts_map ( )
print ( data_out . shape , data_out . sum ( ) )
map_out = HpxMap ( data_out , out_hpx )
else :
map_out . data += map_in . expanded_counts_map ( )
return map_out
|
def parser_factory ( fake_args = None ) :
"""Return a proper contextual OptionParser"""
|
parser = ArgumentParser ( description = 'aomi' )
subparsers = parser . add_subparsers ( dest = 'operation' , help = 'Specify the data ' ' or extraction operation' )
extract_file_args ( subparsers )
environment_args ( subparsers )
aws_env_args ( subparsers )
seed_args ( subparsers )
render_args ( subparsers )
diff_args ( subparsers )
freeze_args ( subparsers )
thaw_args ( subparsers )
template_args ( subparsers )
password_args ( subparsers )
token_args ( subparsers )
help_args ( subparsers )
export_args ( subparsers )
if fake_args is None :
return parser , parser . parse_args ( )
return parser , parser . parse_args ( fake_args )
|
def applyEdits ( self , addFeatures = [ ] , updateFeatures = [ ] , deleteFeatures = None , gdbVersion = None , rollbackOnFailure = True ) :
"""This operation adds , updates , and deletes features to the
associated feature layer or table in a single call .
Inputs :
addFeatures - The array of features to be added . These
features should be common . general . Feature
objects , or they should be a
common . general . FeatureSet object .
updateFeatures - The array of features to be updateded .
These features should be common . Feature
objects
deleteFeatures - string of OIDs to remove from service
gdbVersion - Geodatabase version to apply the edits .
rollbackOnFailure - Optional parameter to specify if the
edits should be applied only if all
submitted edits succeed . If false , the
server will apply the edits that succeed
even if some of the submitted edits fail .
If true , the server will apply the edits
only if all edits succeed . The default
value is true .
Output :
dictionary of messages"""
|
editURL = self . _url + "/applyEdits"
params = { "f" : "json" , 'rollbackOnFailure' : rollbackOnFailure }
if not gdbVersion is None :
params [ 'gdbVersion' ] = gdbVersion
if len ( addFeatures ) > 0 and isinstance ( addFeatures [ 0 ] , Feature ) :
params [ 'adds' ] = json . dumps ( [ f . asDictionary for f in addFeatures ] , default = _date_handler )
elif isinstance ( addFeatures , FeatureSet ) :
params [ 'adds' ] = json . dumps ( [ f . asDictionary for f in addFeatures ] , default = _date_handler )
if len ( updateFeatures ) > 0 and isinstance ( updateFeatures [ 0 ] , Feature ) :
params [ 'updates' ] = json . dumps ( [ f . asDictionary for f in updateFeatures ] , default = _date_handler )
if deleteFeatures is not None and isinstance ( deleteFeatures , str ) :
params [ 'deletes' ] = deleteFeatures
return self . _post ( url = editURL , param_dict = params , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
|
def _variant_po_to_dict ( tokens ) -> CentralDogma :
"""Convert a PyParsing data dictionary to a central dogma abundance ( i . e . , Protein , RNA , miRNA , Gene ) .
: type tokens : ParseResult"""
|
dsl = FUNC_TO_DSL . get ( tokens [ FUNCTION ] )
if dsl is None :
raise ValueError ( 'invalid tokens: {}' . format ( tokens ) )
return dsl ( namespace = tokens [ NAMESPACE ] , name = tokens [ NAME ] , variants = [ _variant_to_dsl_helper ( variant_tokens ) for variant_tokens in tokens [ VARIANTS ] ] , )
|
def unregister ( self , signal ) :
"""Unregisters an existing signal
: param signal : Name of the signal"""
|
if signal in self . signals . keys ( ) :
del ( self . signals [ signal ] )
self . __log . debug ( "Signal %s unregisterd" % signal )
else :
self . __log . debug ( "Signal %s does not exist and could not be unregistered." )
|
def refresh_token ( self ) :
"""Refreshing the current expired access token"""
|
self . token = self . oauth . refresh_token ( self . access_token_url , refresh_token = self . get_refresh_token ( ) )
self . access_token = self . token . get ( "access_token" )
|
def _variable ( lexer ) :
"""Return a variable expression ."""
|
names = _names ( lexer )
tok = next ( lexer )
# NAMES ' [ ' . . . ' ] '
if isinstance ( tok , LBRACK ) :
indices = _indices ( lexer )
_expect_token ( lexer , { RBRACK } )
# NAMES
else :
lexer . unpop_token ( tok )
indices = tuple ( )
return ( 'var' , names , indices )
|
def get_exceptions ( self ) :
"""a tuple of class names for the exception types this method may
raise , or None if this is not a method
reference : http : / / docs . oracle . com / javase / specs / jvms / se7 / html / jvms - 4 . html # jvms - 4.7.5"""
|
# noqa
buff = self . get_attribute ( "Exceptions" )
if buff is None :
return ( )
with unpack ( buff ) as up :
return tuple ( self . deref_const ( e [ 0 ] ) for e in up . unpack_struct_array ( _H ) )
|
def down ( removekeys = False , tgt = '*' , tgt_type = 'glob' , timeout = None , gather_job_timeout = None ) :
'''. . versionchanged : : 2017.7.0
The ` ` expr _ form ` ` argument has been renamed to ` ` tgt _ type ` ` , earlier
releases must use ` ` expr _ form ` ` .
Print a list of all the down or unresponsive salt minions
Optionally remove keys of down minions
CLI Example :
. . code - block : : bash
salt - run manage . down
salt - run manage . down removekeys = True
salt - run manage . down tgt = " webservers " tgt _ type = " nodegroup "'''
|
ret = status ( output = False , tgt = tgt , tgt_type = tgt_type , timeout = timeout , gather_job_timeout = gather_job_timeout ) . get ( 'down' , [ ] )
for minion in ret :
if removekeys :
wheel = salt . wheel . Wheel ( __opts__ )
wheel . call_func ( 'key.delete' , match = minion )
return ret
|
def text_lines_from_local_file ( document , remote = False ) :
"""Return the fulltext of the local file .
@ param document : fullpath to the file that should be read
@ param remote : boolean , if True does not count lines
@ return : list of lines if st was read or an empty list"""
|
try :
if is_pdf ( document ) :
if not executable_exists ( "pdftotext" ) :
current_app . logger . error ( "pdftotext is not available on the system." )
cmd = "pdftotext -q -enc UTF-8 %s -" % re . escape ( document )
out = subprocess . Popen ( [ "pdftotext" , "-q" , "-enc" , "UTF-8" , document , "-" ] , universal_newlines = True , stdout = subprocess . PIPE )
( stdoutdata , stderrdata ) = out . communicate ( )
lines = stdoutdata . splitlines ( )
if not isinstance ( stdoutdata , six . text_type ) : # We are in Python 2 . We need to cast to unicode
lines = [ line . decode ( 'utf8' , 'replace' ) for line in lines ]
else :
filestream = codecs . open ( document , "r" , encoding = "utf8" , errors = "replace" )
# FIXME - we assume it is utf - 8 encoded / that is not good
lines = [ line for line in filestream ]
filestream . close ( )
except IOError as ex1 :
current_app . logger . error ( "Unable to read from file %s. (%s)" % ( document , ex1 . strerror ) )
return [ ]
# Discard lines that do not contain at least one word .
return [ line for line in lines if _ONE_WORD . search ( line ) is not None ]
|
def setDiscoveryTriples ( win , table = "discovery" ) :
"""Provide user with a list of triples that could be discovery triples"""
|
win . help ( "Getting a list of pointings with triples from the CFEPS db" )
pointings = getPointingsWithTriples ( )
win . help ( "Select the " + table + " triple form the list..." )
import time
for pointing in pointings :
header = "%10s %10s %8s %10s %8s" % ( pointing [ 1 ] , 'mjdate' , 'Elongation' , 'Filter' , 'IQ' )
triples = getTriples ( pointing = pointing [ 0 ] )
choices = [ ]
triplist = [ ]
no_type = 0
previous_list = [ ]
for triple in triples : # win . help ( str ( triple ) )
tripinfo = getTripInfo ( triple [ 0 ] )
if not tripinfo [ table ] == None :
previous_list . append ( triple [ 0 ] )
# if not abs ( 180 - tripinfo [ ' elongation ' ] ) < 20:
# continue
triplist . append ( triple )
if str ( tripinfo [ 'iq' ] ) == 'None' :
tripinfo [ 'iq' ] = - 1.0
obs_type = ' '
if tripinfo [ 'discovery' ] :
obs_type = 'D'
elif tripinfo [ 'checkup' ] :
obs_type = 'C'
elif tripinfo [ 'recovery' ] :
obs_type = 'R'
if obs_type == ' ' :
no_type += 1
line = ( obs_type , tripinfo [ 'mjdate' ] , tripinfo [ 'elongation' ] , tripinfo [ 'filter' ] , tripinfo [ 'iq' ] , tripinfo [ 'block' ] )
choices . append ( '%10s %10s %8.2f %10s %8.2f %8s' % line )
if len ( choices ) == 0 or no_type == 0 :
continue
# if len ( previous _ list ) = = 1:
# continue
win . help ( "Choose a " + table + " triple (space) [no choice means skip] then press enter\n (q) to exit" )
choice = win . list ( header , choices )
if choice == None :
win . help ( "Loading next triple" )
break
# # # Record which triplet is a discovery triplet
if len ( choice ) != 1 :
win . help ( "Loading next triple\n" )
continue
discovery_triple = triplist [ choice [ 0 ] ]
for triple in previous_list :
sql = "DELETE FROM " + table + " WHERE triple=%s "
cfeps . execute ( sql , triple )
sql = "INSERT INTO " + table + " ( triple ) VALUES ( %s ) "
cfeps . execute ( sql , discovery_triple )
|
def chunk_math ( text ) :
"""Parameters
text : string
A mathematical context
Returns
list :
A list of single LaTeX entities
Examples
> > > chunk _ math ( ' \ sum _ i ^ n i ^ 2 ' )
[ ' \\ \\ sum ' , ' _ ' , ' i ' , ' ^ ' , ' n ' , ' ' , ' i ' , ' ^ ' , ' 2 ' ]
> > > chunk _ math ( ' \ sum _ { i } ^ n i ^ 2 ' )
[ ' \\ \\ sum ' , ' _ ' , ' { ' , ' i ' , ' } ' , ' ^ ' , ' n ' , ' ' , ' i ' , ' ^ ' , ' 2 ' ]
> > > chunk _ math ( ( r ' \\ Delta F _ 0 & = \\ sqrt { \\ sum _ { i = 1 } ^ n \\ left ( '
. . . r ' \\ frac { \ delta F _ 0 } { \ delta x _ i } '
. . . r ' \ Delta x _ i \\ right ) ^ 2} \\ [0.2cm ] '
. . . r ' \ Delta F _ 0 & = \ sqrt { 6.044 \ cdot 10 ^ { - 6} \\ text { m } ^ 2 } ' ) )
[ ' \\ \\ Delta ' , ' ' , ' F ' , ' _ ' , ' 0 ' , ' ' , ' & ' , ' = ' , ' ' , ' \\ \\ sqrt ' , ' { ' , ' \\ \\ sum ' , ' _ ' , ' { ' , ' i ' , ' = ' , ' 1 ' , ' } ' , ' ^ ' , ' n ' , ' \\ \\ left ' , ' ( ' , ' \\ \\ frac ' , ' { ' , ' \\ \\ delta ' , ' ' , ' F ' , ' _ ' , ' 0 ' , ' } ' , ' { ' , ' \\ \\ delta ' , ' ' , ' x ' , ' _ ' , ' i ' , ' } ' , ' \\ \\ Delta ' , ' ' , ' x ' , ' _ ' , ' i ' , ' \\ \\ right ' , ' ) ' , ' ^ ' , ' 2 ' , ' } ' , ' \\ \\ ' , ' [ ' , ' 0 ' , ' . ' , ' 2 ' , ' c ' , ' m ' , ' ] ' , ' \\ \\ Delta ' , ' ' , ' F ' , ' _ ' , ' 0 ' , ' ' , ' & ' , ' = ' , ' ' , ' \\ \\ sqrt ' , ' { ' , ' 6 ' , ' . ' , ' 0 ' , ' 4 ' , ' 4 ' , ' ' , ' \\ \\ cdot ' , ' ' , ' 1 ' , ' 0 ' , ' ^ ' , ' { ' , ' - ' , ' 6 ' , ' } ' , ' \\ \\ text ' , ' { ' , ' m ' , ' } ' , ' ^ ' , ' 2 ' , ' } ' ]
> > > chunk _ math ( r ' \\ left \\ { a \\ right \\ } ' )
[ ' \\ \\ left ' , ' \\ \\ { ' , ' a ' , ' \\ \\ right ' , ' \\ \\ } ' ]
> > > chunk _ math ( r ' \\ sqrt { b ^ 2-4ac } ' )
[ ' \\ \\ sqrt ' , ' { ' , ' b ' , ' ^ ' , ' 2 ' , ' - ' , ' 4 ' , ' a ' , ' c ' , ' } ' ]
> > > chunk _ math ( ' y ^ { 2 } ' )
[ ' y ' , ' ^ ' , ' { ' , ' 2 ' , ' } ' ]
> > > chunk _ math ( r ' 2 + 3 \\ \\ 6 5 4 ' )
[ ' 2 ' , ' + ' , ' 3 ' , ' \\ \\ \\ \\ ' , ' 6 ' , ' ' , ' 5 ' , ' ' , ' 4 ' ]"""
|
# Fail when ' { ' and ' } ' don ' t match - be aware of escaped symbols !
opened_braces = 0
last_char = ''
for char in text :
if char == '{' and last_char != '\\' :
opened_braces += 1
if char == '}' and last_char != '\\' :
opened_braces -= 1
if opened_braces < 0 :
raise ValueError ( "Braces don't match: %s" % text )
last_char = char
if opened_braces != 0 :
raise ValueError ( "%i braces are still open" % opened_braces )
# Parse
single_symbol = [ '_' , '^' , '&' , '{' , '}' ]
breaking_chars = [ '\\' , ' ' ] + single_symbol
chunks = [ ]
current_chunk = ''
for char in text :
if current_chunk == '' :
current_chunk = char
continue
if char == '\\' :
if current_chunk == '\\' :
current_chunk += char
chunks . append ( current_chunk )
current_chunk = ''
else :
chunks . append ( current_chunk )
current_chunk = char
elif current_chunk == '\\' and char in breaking_chars : # escaped
current_chunk += char
chunks . append ( current_chunk )
current_chunk = ''
elif char in breaking_chars :
chunks . append ( current_chunk )
current_chunk = char
elif char in string . letters + string . digits and current_chunk [ 0 ] == '\\' :
current_chunk += char
else :
chunks . append ( current_chunk )
current_chunk = char
# Add the last chunk
if current_chunk != '' :
chunks . append ( current_chunk )
filtered = [ ]
for chunk in chunks :
if len ( filtered ) > 0 and filtered [ - 1 ] == ' ' and chunk == ' ' :
continue
filtered . append ( chunk )
return filtered
|
def _process_with_multiprocessing ( self , X : Union [ pd . DataFrame , np . ndarray ] , n_refs : int , cluster_array : np . ndarray ) :
"""Process calling of . calculate _ gap ( ) method using the multiprocessing library"""
|
with ProcessPoolExecutor ( max_workers = self . n_jobs ) as executor :
jobs = [ executor . submit ( self . _calculate_gap , X , n_refs , n_clusters ) for n_clusters in cluster_array ]
for future in as_completed ( jobs ) :
gap_value , k = future . result ( )
yield ( gap_value , k )
|
def open ( dataset_dir , access_mode = READ_ONLY_ACCESS ) :
"""Opens a tensor dataset ."""
|
# check access mode
if access_mode == WRITE_ACCESS :
raise ValueError ( 'Cannot open a dataset with write-only access' )
# read config
try : # json load
config_filename = os . path . join ( dataset_dir , 'config.json' )
config = json . load ( open ( config_filename , 'r' ) )
except : # YAML load
config_filename = os . path . join ( dataset_dir , 'config.yaml' )
config = YamlConfig ( config_filename )
# open dataset
dataset = TensorDataset ( dataset_dir , config , access_mode = access_mode )
return dataset
|
def encrypt_text ( self , text , * args , ** kwargs ) :
"""Encrypt a string .
input : unicode str , output : unicode str"""
|
b = text . encode ( "utf-8" )
token = self . encrypt ( b , * args , ** kwargs )
return base64 . b64encode ( token ) . decode ( "utf-8" )
|
def p_substr_assignment_no_let ( p ) :
"""statement : ID LP expr RP EQ expr"""
|
# This can be only a substr assignment like a $ ( i + 3 ) = " . " , since arrays
# have ARRAY _ ID already
entry = SYMBOL_TABLE . access_call ( p [ 1 ] , p . lineno ( 1 ) )
if entry is None :
return
if entry . class_ == CLASS . unknown :
entry . class_ = CLASS . var
if p [ 6 ] . type_ != TYPE . string :
api . errmsg . syntax_error_expected_string ( p . lineno ( 5 ) , p [ 6 ] . type_ )
lineno = p . lineno ( 2 )
base = make_number ( OPTIONS . string_base . value , lineno , _TYPE ( gl . STR_INDEX_TYPE ) )
substr = make_typecast ( _TYPE ( gl . STR_INDEX_TYPE ) , p [ 3 ] , lineno )
p [ 0 ] = make_sentence ( 'LETSUBSTR' , entry , make_binary ( lineno , 'MINUS' , substr , base , func = lambda x , y : x - y ) , make_binary ( lineno , 'MINUS' , substr , base , func = lambda x , y : x - y ) , p [ 6 ] )
|
def to_size ( value , convert_to_human = True ) :
'''Convert python int ( bytes ) to zfs size
NOTE : http : / / src . illumos . org / source / xref / illumos - gate / usr / src / lib / pyzfs / common / util . py # 114'''
|
value = from_size ( value )
if value is None :
value = 'none'
if isinstance ( value , Number ) and value > 1024 and convert_to_human :
v_power = int ( math . floor ( math . log ( value , 1024 ) ) )
v_multiplier = math . pow ( 1024 , v_power )
# NOTE : zfs is a bit odd on how it does the rounding ,
# see libzfs implementation linked above
v_size_float = float ( value ) / v_multiplier
if v_size_float == int ( v_size_float ) :
value = "{:.0f}{}" . format ( v_size_float , zfs_size [ v_power - 1 ] , )
else :
for v_precision in [ "{:.2f}{}" , "{:.1f}{}" , "{:.0f}{}" ] :
v_size = v_precision . format ( v_size_float , zfs_size [ v_power - 1 ] , )
if len ( v_size ) <= 5 :
value = v_size
break
return value
|
def _create_minimum_needs_options_action ( self ) :
"""Create action for global minimum needs dialog ."""
|
icon = resources_path ( 'img' , 'icons' , 'show-global-minimum-needs.svg' )
self . action_minimum_needs_config = QAction ( QIcon ( icon ) , self . tr ( 'Minimum Needs Configuration' ) , self . iface . mainWindow ( ) )
self . action_minimum_needs_config . setStatusTip ( self . tr ( 'Open InaSAFE minimum needs configuration' ) )
self . action_minimum_needs_config . setWhatsThis ( self . tr ( 'Open InaSAFE minimum needs configuration' ) )
self . action_minimum_needs_config . triggered . connect ( self . show_minimum_needs_configuration )
self . add_action ( self . action_minimum_needs_config , add_to_toolbar = self . full_toolbar )
|
def tile_y_size ( self , zoom ) :
"""Height of a tile in SRID units at zoom level .
- zoom : zoom level"""
|
warnings . warn ( DeprecationWarning ( "tile_y_size is deprecated" ) )
validate_zoom ( zoom )
return round ( self . y_size / self . matrix_height ( zoom ) , ROUND )
|
def convert_user_to_ldap ( self , ID , DN ) :
"""Convert a normal user to a LDAP user ."""
|
# http : / / teampasswordmanager . com / docs / api - users / # convert _ to _ ldap
data = { 'login_dn' : DN }
log . info ( 'Convert User %s to LDAP DN %s' % ( ID , DN ) )
self . put ( 'users/%s/convert_to_ldap.json' % ID , data )
|
def _add_generate_sub_commands ( self ) :
"""Sub commands for generating models for usage by clients .
Currently supports Google Closure ."""
|
gen_parser = self . _subparsers_handle . add_parser ( name = "gen" , help = "generate client side model stubs, filters" )
gen_parser . add_argument ( "-t" , "--template" , choices = [ 'closure.model' , 'closure.filter' ] , default = 'closure.model' , required = True , dest = "template" , help = "template to use for client side code generation" )
gen_parser . add_argument ( "-m" , "--model" , required = True , dest = "models_definition" , help = "path to models definition file or package" )
gen_parser . add_argument ( "-o" , "--output" , default = "." , dest = "output" , help = "output path for generated code" )
gen_parser . add_argument ( "-n" , "--namespace" , required = True , dest = "namespace" , help = "namespace to use with template e.g prestans.data.model" )
gen_parser . add_argument ( "-fn" , "--filter-namespace" , required = False , default = None , dest = "filter_namespace" , help = "filter namespace to use with template e.g prestans.data.filter" )
|
def _init_multicast_socket ( self ) :
"""Init multicast socket
: rtype : None"""
|
self . debug ( "()" )
# Create a UDP socket
self . _multicast_socket = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
# Allow reuse of addresses
self . _multicast_socket . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
# Set multicast interface to local _ ip
self . _multicast_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_IF , socket . inet_aton ( self . _multicast_ip ) )
# Set multicast time - to - live
# Should keep our multicast packets from escaping the local network
self . _multicast_socket . setsockopt ( socket . IPPROTO_IP , socket . IP_MULTICAST_TTL , self . _multicast_ttl )
self . _add_membership_multicast_socket ( )
# Bind socket
if platform . system ( ) . lower ( ) == "darwin" :
self . _multicast_socket . bind ( ( "0.0.0.0" , self . _multicast_bind_port ) )
else :
self . _multicast_socket . bind ( ( self . _multicast_ip , self . _multicast_bind_port ) )
self . _listening . append ( self . _multicast_socket )
|
def _clean_dead_sessions ( self ) :
"""Traverses sessions to determine if any sockets
were removed ( indicates a stopped session ) .
In these cases , remove the session ."""
|
for sck in list ( self . sessions . keys ( ) ) :
session = self . sessions [ sck ]
if session . socket is None :
del self . sessions [ sck ]
|
def download ( self , name : str , force : bool = False ) -> bool :
"""Attempts to download a given Docker image . If ` force = True ` , then any
previously installed version of the image ( described by the
instructions ) will be replaced by the image on DockerHub .
Parameters :
name : the name of the Docker image .
Returns :
` True ` if successfully downloaded , otherwise ` False ` ."""
|
try :
self . __docker . images . pull ( name )
return True
except docker . errors . NotFound :
print ( "Failed to locate image on DockerHub: {}" . format ( name ) )
return False
|
def parse ( self , descriptor ) :
"""Creates a text styling from a descriptor
A descriptor is a dictionary containing any of the following keys :
* fg : The foreground color ( name or int )
See ` bgseq `
* bg : The background color ( name or int )
See ` fgseq `
* fmt : The types of special text formatting ( any combination of ' b ' , ' u ' , ' i ' , and ' r ' )
See ` typeseq `"""
|
fg = descriptor . get ( 'fg' )
bg = descriptor . get ( 'bg' )
types = descriptor . get ( 'fmt' )
ret = ""
if fg :
ret += fgseq ( fg )
if bg :
ret += bgseq ( bg )
if types :
t = typeseq ( types )
if t :
ret += t
# wew , strings and bytes , what ' s a guy to do !
reset = resetseq ( )
if not isinstance ( reset , six . text_type ) :
reset = reset . decode ( 'utf-8' )
def ret_func ( msg ) :
if not isinstance ( msg , six . text_type ) :
msg = msg . decode ( 'utf-8' )
return ret + msg + reset
self . decorator = ret_func
|
def add_task_db ( self , task ) :
'''向数据库中写入一个新的任务记录'''
|
sql = 'INSERT INTO tasks VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
req = self . cursor . execute ( sql , task )
self . check_commit ( )
|
def get_option ( options = None , local_tag = None , doc = None , doc_tag = None , default = None , error_on_none = True ) :
"""fetch an option variable ,
from either a local ( element ) level option / attribute tag ,
document level metadata tag ,
or a default
: type options : ` ` dict ` `
: type local _ tag : ` ` str ` `
: type doc : : class : ` Doc `
: type doc _ tag : ` ` str ` `
: type default : ` ` any ` `
: type error _ on _ none : ` ` bool ` `
The order of preference is local > document > default ,
although if a local or document tag returns None , then the next level down is used .
Also , if error _ on _ none = True and the final variable is None , then a ValueError will be raised
In this manner you can set global variables , which can be optionally overriden at a local level .
For example , to apply different styles to docx text
main . md :
style - div :
name : MyStyle
: : : style
some text
: : : { . style name = MyOtherStyle }
some more text
style _ filter . py :
import panflute as pf
def action ( elem , doc ) :
if type ( elem ) = = pf . Div :
style = pf . get _ option ( elem . attributes , " name " , doc , " style - div . name " )
elem . attributes [ " custom - style " ] = style
def main ( doc = None ) :
return run _ filter ( action , doc = doc )
if _ _ name _ _ = = " _ _ main _ _ " :
main ( )"""
|
variable = None
# element level
if options is not None and local_tag is not None :
if local_tag in options and options [ local_tag ] is not None :
variable = options [ local_tag ]
if variable is not None :
return variable
# doc level
if doc is not None and doc_tag is not None :
variable = doc . get_metadata ( doc_tag , None )
if variable is not None :
return variable
# default level
variable = default
if variable is None and error_on_none :
raise ValueError ( "could not retrieve a value for tag; local={0}, doc={1}" . format ( local_tag , doc_tag ) )
return variable
|
def autoUseMyMetrics ( ttGlyph , glyphName , hmtx ) :
"""Set the " USE _ MY _ METRICS " flag on the first component having the
same advance width as the composite glyph , no transform and no
horizontal shift ( but allow it to shift vertically ) .
This forces the composite glyph to use the possibly hinted horizontal
metrics of the sub - glyph , instead of those from the " hmtx " table ."""
|
width = hmtx [ glyphName ] [ 0 ]
for component in ttGlyph . components :
try :
baseName , transform = component . getComponentInfo ( )
except AttributeError : # component uses ' { first , second } Pt ' instead of ' x ' and ' y '
continue
try :
baseMetrics = hmtx [ baseName ]
except KeyError :
continue
# ignore missing components
else :
if ( baseMetrics [ 0 ] == width and transform [ : - 1 ] == ( 1 , 0 , 0 , 1 , 0 ) ) :
component . flags |= USE_MY_METRICS
break
|
def _get_url ( self , url ) :
"""Returns normalized url . If schema is not given , would fall
to filesystem
( ` ` file : / / / ` ` ) schema ."""
|
url = str ( url )
if url != 'default' and not '://' in url :
url = "file:" + urllib . pathname2url ( url )
return url
|
def retry_it ( exceptions = ( Exception , ) , tries = 10 , wait = 0 , handler = None , raised_exception = ReusablesError , raised_message = None ) :
"""Retry a function if an exception is raised , or if output _ check returns
False .
Message format options : { func } { args } { kwargs }
: param exceptions : tuple of exceptions to catch
: param tries : number of tries to retry the function
: param wait : time to wait between executions in seconds
: param handler : function to check if output is valid , must return bool
: param raised _ exception : default is ReusablesError
: param raised _ message : message to pass to raised exception"""
|
def func_wrapper ( func ) :
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
msg = ( raised_message if raised_message else "Max retries exceeded for function '{func}'" )
if not raised_message :
msg = _add_args ( msg , * args , ** kwargs )
try :
result = func ( * args , ** kwargs )
except exceptions :
if tries :
if wait :
time . sleep ( wait )
return retry_it ( exceptions = exceptions , tries = tries - 1 , handler = handler , wait = wait ) ( func ) ( * args , ** kwargs )
if raised_exception :
exc = raised_exception ( msg . format ( func = func . __name__ , args = args , kwargs = kwargs ) )
exc . __cause__ = None
raise exc
else :
if handler :
if not handler ( result ) :
return retry_it ( exceptions = exceptions , tries = tries - 1 , handler = handler , wait = wait ) ( func ) ( * args , ** kwargs )
return result
return wrapper
return func_wrapper
|
def naive_request ( self , url , method , ** kwargs ) :
"""Makes a request to url using an without oauth authorization
session , but through a normal session
: param str url : url to send request to
: param str method : type of request ( get / put / post / patch / delete )
: param kwargs : extra params to send to the request api
: return : Response of the request
: rtype : requests . Response"""
|
return self . _internal_request ( self . naive_session , url , method , ** kwargs )
|
def process ( self , sched , coro ) :
"""Add the calling coro in a waiting for signal queue ."""
|
super ( WaitForSignal , self ) . process ( sched , coro )
waitlist = sched . sigwait [ self . name ]
waitlist . append ( ( self , coro ) )
if self . name in sched . signals :
sig = sched . signals [ self . name ]
if sig . recipients <= len ( waitlist ) :
sig . process ( sched , sig . coro )
del sig . coro
del sched . signals [ self . name ]
|
def refresh_devices ( self ) :
'''Queries hub for list of devices , and creates new device objects'''
|
try :
response = self . api . get ( "/api/v2/devices" , { 'properties' : 'all' } )
for device_data in response [ 'DeviceList' ] :
self . devices . append ( Device ( device_data , self ) )
except APIError as e :
print ( "API error: " )
for key , value in e . data . iteritems :
print ( str ( key ) + ": " + str ( value ) )
|
def set_sort_order ( self , sort_order ) :
"""Use the SortOrder object to sort the listings descending or ascending .
: param sort _ order :
: return :"""
|
if not isinstance ( sort_order , SortOrder ) :
raise DaftException ( "sort_order should be an instance of SortOrder." )
self . _sort_order = str ( sort_order )
|
def _encode_image ( self , np_image ) :
"""Returns np _ image encoded as jpeg or png ."""
|
if np_image . dtype != np . uint8 :
raise ValueError ( 'Image should be uint8. Detected: %s.' % np_image . dtype )
utils . assert_shape_match ( np_image . shape , self . _shape )
return self . _runner . run ( ENCODE_FN [ self . _encoding_format ] , np_image )
|
def _merge_meta ( self , encoded_meta , meta ) :
"""Merge new meta dict into encoded meta . Returns new encoded meta ."""
|
new_meta = None
if meta :
_meta = self . _decode_meta ( encoded_meta )
for key , value in six . iteritems ( meta ) :
if value is None :
_meta . pop ( key , None )
else :
_meta [ key ] = value
new_meta = self . _encode_meta ( _meta )
return new_meta
|
def parse_host ( parser , event , node ) :
"""Parse and return the host entity if that is the next entity
< ! ELEMENT HOST ( # PCDATA ) >"""
|
# pylint : disable = unused - argument
host = ''
( next_event , next_node ) = six . next ( parser )
if next_event == pulldom . CHARACTERS :
host = next_node . nodeValue
( next_event , next_node ) = six . next ( parser )
if not _is_end ( next_event , next_node , 'HOST' ) :
raise ParseError ( 'Expecting end HOST' )
return host
|
def _request ( self , url , api_call , request_args , method = 'GET' ) :
"""Function to request and returning JSON data .
Parameters :
url ( str ) : Base url call .
api _ call ( str ) : API function to be called .
request _ args ( dict ) : All requests parameters .
method ( str ) : ( Defauld : GET ) HTTP method ' GET ' or ' POST '
Raises :
PybooruHTTPError : HTTP Error .
requests . exceptions . Timeout : When HTTP Timeout .
ValueError : When can ' t decode JSON response ."""
|
try :
if method != 'GET' : # Reset content - type for data encoded as a multipart form
self . client . headers . update ( { 'content-type' : None } )
response = self . client . request ( method , url , ** request_args )
self . last_call . update ( { 'API' : api_call , 'url' : response . url , 'status_code' : response . status_code , 'status' : self . _get_status ( response . status_code ) , 'headers' : response . headers } )
if response . status_code in ( 200 , 201 , 202 , 204 ) :
return response . json ( )
raise PybooruHTTPError ( "In _request" , response . status_code , response . url )
except requests . exceptions . Timeout :
raise PybooruError ( "Timeout! url: {0}" . format ( response . url ) )
except ValueError as e :
raise PybooruError ( "JSON Error: {0} in line {1} column {2}" . format ( e . msg , e . lineno , e . colno ) )
|
def fixed_length_split ( s , width ) :
"""固定长度分割字符串
: param s :
: param width :
: return :"""
|
# 使用正则的方法
# import re
# split = re . findall ( r ' . { % s } ' % width , string )
return [ s [ x : x + width ] for x in range ( 0 , len ( s ) , width ) ]
|
def sentryDSN ( self , * args , ** kwargs ) :
"""Get DSN for Sentry Project
Get temporary DSN ( access credentials ) for a sentry project .
The credentials returned can be used with any Sentry client for up to
24 hours , after which the credentials will be automatically disabled .
If the project doesn ' t exist it will be created , and assigned to the
initial team configured for this component . Contact a Sentry admin
to have the project transferred to a team you have access to if needed
This method gives output : ` ` v1 / sentry - dsn - response . json # ` `
This method is ` ` stable ` `"""
|
return self . _makeApiCall ( self . funcinfo [ "sentryDSN" ] , * args , ** kwargs )
|
def records ( credentials , url = "https://freedns.afraid.org/api/" ) :
"""Yield the dynamic DNS records associated with this account .
: param credentials : an AfraidCredentials instance
: param url : the service URL"""
|
params = { "action" : "getdyndns" , "sha" : credentials . sha }
req = requests . get ( url , params = params , headers = constants . REQUEST_HEADERS_DEFAULT , timeout = 60 )
for record_line in ( line . strip ( ) for line in req . text . splitlines ( ) if len ( line . strip ( ) ) > 0 ) :
yield AfraidDynDNSRecord ( * record_line . split ( "|" ) )
|
def legacy_events_view ( request ) :
"""View to see legacy events ."""
|
events = TeacherEvent . objects . all ( )
event_count = events . count ( )
paginator = Paginator ( events , 100 )
page = request . GET . get ( 'page' )
try :
events = paginator . page ( page )
except PageNotAnInteger :
events = paginator . page ( 1 )
except EmptyPage :
events = paginator . page ( paginator . num_pages )
return render_to_response ( 'teacher_events.html' , { 'page_name' : "Legacy Events" , 'events' : events , 'event_count' : event_count , } , context_instance = RequestContext ( request ) )
|
def from_structures ( cls , structures , authors , projects = None , references = '' , remarks = None , data = None , histories = None , created_at = None ) :
"""A convenience method for getting a list of StructureNL objects by
specifying structures and metadata separately . Some of the metadata
is applied to all of the structures for ease of use .
Args :
structures : A list of Structure objects
authors : * List * of { " name " : ' ' , " email " : ' ' } dicts ,
* list * of Strings as ' John Doe < johndoe @ gmail . com > ' ,
or a single String with commas separating authors
projects : List of Strings [ ' Project A ' , ' Project B ' ] . This
applies to all structures .
references : A String in BibTeX format . Again , this applies to all
structures .
remarks : List of Strings [ ' Remark A ' , ' Remark B ' ]
data : A list of free form dict . Namespaced at the root level
with an underscore , e . g . { " _ materialsproject " : < custom data > }
. The length of data should be the same as the list of
structures if not None .
histories : List of list of dicts - [ [ { ' name ' : ' ' , ' url ' : ' ' ,
' description ' : { } } ] , . . . ] The length of histories should be the
same as the list of structures if not None .
created _ at : A datetime object"""
|
data = [ { } ] * len ( structures ) if data is None else data
histories = [ [ ] ] * len ( structures ) if histories is None else histories
snl_list = [ ]
for i , struct in enumerate ( structures ) :
snl = StructureNL ( struct , authors , projects = projects , references = references , remarks = remarks , data = data [ i ] , history = histories [ i ] , created_at = created_at )
snl_list . append ( snl )
return snl_list
|
def _mask_to_bytes ( self , mask ) :
"""Convert the ( type long ) mask to a cpu _ set _ t ."""
|
chunks = [ ]
shiftmask = ( 2 ** 64 ) - 1
for x in range ( 16 ) :
chunks . append ( struct . pack ( '<Q' , mask & shiftmask ) )
mask >>= 64
return mitogen . core . b ( '' ) . join ( chunks )
|
def strip_ssh_from_git_uri ( uri ) : # type : ( S ) - > S
"""Return git + ssh : / / formatted URI to git + git @ format"""
|
if isinstance ( uri , six . string_types ) :
if "git+ssh://" in uri :
parsed = urlparse ( uri )
# split the path on the first separating / so we can put the first segment
# into the ' netloc ' section with a : separator
path_part , _ , path = parsed . path . lstrip ( "/" ) . partition ( "/" )
path = "/{0}" . format ( path )
parsed = parsed . _replace ( netloc = "{0}:{1}" . format ( parsed . netloc , path_part ) , path = path )
uri = urlunparse ( parsed ) . replace ( "git+ssh://" , "git+" , 1 )
return uri
|
def copyto ( self , src , where = None ) :
"""Emulates function ` copyto ` in NumPy .
Parameters
where : ( N , ) bool ndarray
True if particle n in src must be copied .
src : ( N , ) ` ThetaParticles ` object
source
for each n such that where [ n ] is True , copy particle n in src
into self ( at location n )"""
|
for k in self . containers :
v = self . __dict__ [ k ]
if isinstance ( v , np . ndarray ) :
np . copyto ( v , src . __dict__ [ k ] , where = where )
else :
v . copyto ( src . __dict__ [ k ] , where = where )
|
def from_python_src ( cls , pySrc , lambdas_path , json_filename : str , stem : str , save_file : bool = False , ) :
"""Builds GrFN object from Python source code ."""
|
asts = [ ast . parse ( pySrc ) ]
pgm_dict = genPGM . create_pgm_dict ( lambdas_path , asts , json_filename , { "FileName" : f"{stem}.py" } , # HACK
)
lambdas = importlib . __import__ ( stem + "_lambdas" )
return cls . from_dict ( pgm_dict , lambdas )
|
def encrypt_file_inline ( filename , passphrase ) :
"""Encrypt file inline , with an optional passphrase .
If you set the passphrase to None , a default is used .
This will make you vulnerable to confirmation attacks
and learn - partial - information attacks .
: param filename : The name of the file to encrypt .
: type filename : str
: param passphrase : The passphrase used to decrypt the file .
: type passphrase : str or None
: returns : The key required to decrypt the file .
: rtype : str"""
|
key = key_generators . key_from_file ( filename , passphrase )
inline_transform ( filename , key )
return key
|
def get_formatted_interval ( results_range , default = _marker ) :
"""Returns a string representation of the interval defined by the results
range passed in
: param results _ range : a dict or a ResultsRangeDict"""
|
if not isinstance ( results_range , Mapping ) :
if default is not _marker :
return default
api . fail ( "Type not supported" )
results_range = ResultsRangeDict ( results_range )
min_str = results_range . min if api . is_floatable ( results_range . min ) else None
max_str = results_range . max if api . is_floatable ( results_range . max ) else None
if min_str is None and max_str is None :
if default is not _marker :
return default
api . fail ( "Min and max values are not floatable or not defined" )
min_operator = results_range . min_operator
max_operator = results_range . max_operator
if max_str is None :
return "{}{}" . format ( MIN_OPERATORS . getValue ( min_operator ) , min_str )
if min_str is None :
return "{}{}" . format ( MAX_OPERATORS . getValue ( max_operator ) , max_str )
# Both values set . Return an interval
min_bracket = min_operator == 'geq' and '[' or '('
max_bracket = max_operator == 'leq' and ']' or ')'
return "{}{};{}{}" . format ( min_bracket , min_str , max_str , max_bracket )
|
def _isinstance ( obj , cls , bound_Generic = None , bound_typevars = None , bound_typevars_readonly = False , follow_fwd_refs = True , _recursion_check = None ) :
"""Access this via ` ` pytypes . is _ of _ type ` ` .
Works like ` ` isinstance ` ` , but supports PEP 484 style types from ` ` typing ` ` module .
obj : Any
The object to check for being an instance of ` ` cls ` ` .
cls : type
The type to check for ` ` obj ` ` being an instance of .
bound _ Generic : Optional [ Generic ]
A type object holding values for unbound typevars occurring in ` ` cls ` ` .
Default : None
If ` ` cls ` ` contains unbound ` ` TypeVar ` ` s and ` ` bound _ Generic ` ` is provided , this function
attempts to retrieve corresponding values for the unbound ` ` TypeVar ` ` s from ` ` bound _ Generic ` ` .
In collision case with ` ` bound _ typevars ` ` the value from ` ` bound _ Generic ` ` if preferred .
bound _ typevars : Optional [ Dict [ typing . TypeVar , type ] ]
A dictionary holding values for unbound typevars occurring in ` ` cls ` ` .
Default : { }
Depending on ` ` bound _ typevars _ readonly ` ` pytypes can also bind values to typevars as needed .
This is done by inserting according mappings into this dictionary . This can e . g . be useful to
infer values for ` ` TypeVar ` ` s or to consistently check a set of ` ` TypeVar ` ` s across multiple
calls , e . g . when checking all arguments of a function call .
In collision case with ` ` bound _ Generic ` ` the value from ` ` bound _ Generic ` ` if preferred .
bound _ typevars _ readonly : bool
Defines if pytypes is allowed to write into the ` ` bound _ typevars ` ` dictionary .
Default : True
If set to False , pytypes cannot assign values to ` ` TypeVar ` ` s , but only checks regarding
values already present in ` ` bound _ typevars ` ` or ` ` bound _ Generic ` ` .
follow _ fwd _ refs : bool
Defines if ` ` ForwardRef ` ` s should be explored .
Default : True
If this is set to ` ` False ` ` and a ` ` ForwardRef ` ` is encountered , pytypes aborts the check
raising a ForwardRefError .
_ recursion _ check : Optional [ Dict [ type , Set [ type ] ] ]
Internally used for recursion checks .
Default : None
If ` ` Union ` ` s and ` ` ForwardRef ` ` s occur in the same type , recursions can occur . As soon as
a ` ` ForwardRef ` ` is encountered , pytypes automatically creates this dictionary and
continues in recursion - proof manner ."""
|
if bound_typevars is None :
bound_typevars = { }
# Special treatment if cls is Iterable [ . . . ]
if is_Generic ( cls ) and cls . __origin__ is typing . Iterable :
if not is_iterable ( obj ) :
return False
itp = get_iterable_itemtype ( obj )
if itp is None :
return not pytypes . check_iterables
else :
return _issubclass ( itp , cls . __args__ [ 0 ] , bound_Generic , bound_typevars , bound_typevars_readonly , follow_fwd_refs , _recursion_check )
if is_Callable ( cls ) :
return _isinstance_Callable ( obj , cls , bound_Generic , bound_typevars , bound_typevars_readonly , follow_fwd_refs , _recursion_check )
return _issubclass ( deep_type ( obj ) , cls , bound_Generic , bound_typevars , bound_typevars_readonly , follow_fwd_refs , _recursion_check )
|
def recent_changes ( request ) :
"""Display the recent changes ."""
|
page = max ( 1 , request . args . get ( "page" , type = int ) )
query = RevisionedPage . query . order_by ( RevisionedPage . revision_id . desc ( ) )
return Response ( generate_template ( "recent_changes.html" , pagination = Pagination ( query , 20 , page , "Special:Recent_Changes" ) , ) )
|
def log_likelihood ( z , x , P , H , R ) :
"""Returns log - likelihood of the measurement z given the Gaussian
posterior ( x , P ) using measurement function H and measurement
covariance error R"""
|
S = np . dot ( H , np . dot ( P , H . T ) ) + R
return logpdf ( z , np . dot ( H , x ) , S )
|
def contiguous_slice ( in1 ) :
"""This function unpads an array on the GPU in such a way as to make it contiguous .
INPUTS :
in1 ( no default ) : Array containing data which has been padded .
OUTPUTS :
gpu _ out1 Array containing unpadded , contiguous data ."""
|
ker = SourceModule ( """
__global__ void contiguous_slice_ker(float *in1, float *out1)
{
const int len = gridDim.x*blockDim.x;
const int col = (blockDim.x * blockIdx.x + threadIdx.x);
const int row = (blockDim.y * blockIdx.y + threadIdx.y);
const int tid2 = col + len*row;
const int first_idx = len/4;
const int last_idx = (3*len)/4;
const int out_idx = (col-first_idx)+(row-first_idx)*(len/2);
if (((col>=first_idx)&(row>=first_idx))&((col<last_idx)&(row<last_idx)))
{ out1[out_idx] = in1[tid2]; }
}
""" , keep = True )
gpu_out1 = gpuarray . empty ( [ in1 . shape [ 0 ] / 2 , in1 . shape [ 1 ] / 2 ] , np . float32 )
contiguous_slice_ker = ker . get_function ( "contiguous_slice_ker" )
contiguous_slice_ker ( in1 , gpu_out1 , block = ( 32 , 32 , 1 ) , grid = ( int ( in1 . shape [ 1 ] // 32 ) , int ( in1 . shape [ 0 ] // 32 ) ) )
return gpu_out1
|
def from_yaml ( cls , yaml_path , ** kwargs ) :
"""Create cluster with worker pod spec defined by a YAML file
We can start a cluster with pods defined in an accompanying YAML file
like the following :
. . code - block : : yaml
kind : Pod
metadata :
labels :
foo : bar
baz : quux
spec :
containers :
- image : daskdev / dask : latest
name : dask - worker
args : [ dask - worker , $ ( DASK _ SCHEDULER _ ADDRESS ) , - - nthreads , ' 2 ' , - - memory - limit , 8GB ]
restartPolicy : Never
Examples
> > > cluster = KubeCluster . from _ yaml ( ' pod . yaml ' , namespace = ' my - ns ' ) # doctest : + SKIP
See Also
KubeCluster . from _ dict"""
|
if not yaml :
raise ImportError ( "PyYaml is required to use yaml functionality, please install it!" )
with open ( yaml_path ) as f :
d = yaml . safe_load ( f )
d = dask . config . expand_environment_variables ( d )
return cls . from_dict ( d , ** kwargs )
|
def copy_doc ( klass , fnname ) :
"""Copies documentation string of a method from the super class into the
rewritten method of the given class"""
|
base_meth , base_func = __get_meth_func ( klass . __base__ , fnname )
meth , func = __get_meth_func ( klass , fnname )
func . __doc__ = base_func . __doc__
|
def runcmd ( command , command_input = None , cwd = None ) :
"""Run a command , potentially sending stdin , and capturing stdout / err ."""
|
proc = subprocess . Popen ( command , stdin = subprocess . PIPE , stdout = subprocess . PIPE , stderr = subprocess . PIPE , cwd = cwd )
( stdout , stderr ) = proc . communicate ( command_input )
if proc . returncode != 0 :
sys . stderr . write ( 'ABORTING: command "%s" failed w/ code %s:\n' '%s\n%s' % ( command , proc . returncode , stdout , stderr ) )
sys . exit ( proc . returncode )
return proc . returncode , stdout , stderr
|
def read_line ( csv_contents , options , prop_indices , mol , ensemble_list = None ) :
"""read csv line"""
|
if not ensemble_list :
score_field = options . score_field
status_field = options . status_field
active_label = options . active_label
decoy_label = options . decoy_label
# do the active / decoy labels have appropriate values ?
active_value_matcher = re . compile ( active_label )
decoy_value_matcher = re . compile ( decoy_label )
status_label_index = prop_indices [ status_field ]
if not active_value_matcher . match ( csv_contents [ status_label_index ] ) and not decoy_value_matcher . match ( csv_contents [ status_label_index ] ) :
print ( "\n molecule lacks appropriate status label" )
return 1
# are the score field values defined ?
score_field_indices = [ ]
if ensemble_list :
queryList = ensemble_list
else :
queryList = [ x for x in prop_indices . keys ( ) if score_field in x ]
for query in queryList :
score_field_indices . append ( prop_indices [ query ] )
for value in [ csv_contents [ x ] for x in score_field_indices ] :
if value in ( '' , 'n/a' , 'N/A' , None ) :
print ( "\n molecule lacks appropriate score field value" )
return 1
# loop over property values
for label in prop_indices . keys ( ) : # get property value
value_index = prop_indices [ label ]
value = csv_contents [ value_index ]
# set corresponding molecule attribute
if label in queryList :
mol . SetProp ( label , value , 'score' )
else :
mol . SetProp ( label , value )
# return mol
return mol
|
def browse_morelikethis_preview ( self , seed ) :
"""Fetch More Like This preview result for a seed deviation
: param seed : The deviationid to fetch more like"""
|
response = self . _req ( '/browse/morelikethis/preview' , { "seed" : seed } )
returned_seed = response [ 'seed' ]
author = User ( )
author . from_dict ( response [ 'author' ] )
more_from_artist = [ ]
for item in response [ 'more_from_artist' ] :
d = Deviation ( )
d . from_dict ( item )
more_from_artist . append ( d )
more_from_da = [ ]
for item in response [ 'more_from_da' ] :
d = Deviation ( )
d . from_dict ( item )
more_from_da . append ( d )
return { "seed" : returned_seed , "author" : author , "more_from_artist" : more_from_artist , "more_from_da" : more_from_da }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.