signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def reciprocal_lattice_from_outcar ( filename ) : # from https : / / github . com / MaterialsDiscovery / PyChemia
"""Finds and returns the reciprocal lattice vectors , if more than
one set present , it just returns the last one .
Args :
filename ( Str ) : The name of the outcar file to be read
Returns :
List ( Float ) : The reciprocal lattice vectors .""" | outcar = open ( filename , "r" ) . read ( )
# just keeping the last component
recLat = re . findall ( r"reciprocal\s*lattice\s*vectors\s*([-.\s\d]*)" , outcar ) [ - 1 ]
recLat = recLat . split ( )
recLat = np . array ( recLat , dtype = float )
# up to now I have , both direct and rec . lattices ( 3 + 3 = 6 columns )
recLat . shape = ( 3 , 6 )
recLat = recLat [ : , 3 : ]
return recLat |
def default_resolve_fn ( source , info , ** args ) : # type : ( Any , ResolveInfo , * * Any ) - > Optional [ Any ]
"""If a resolve function is not given , then a default resolve behavior is used which takes the property of the source object
of the same name as the field and returns it as the result , or if it ' s a function , returns the result of calling that function .""" | name = info . field_name
if isinstance ( source , dict ) :
property = source . get ( name )
else :
property = getattr ( source , name , None )
if callable ( property ) :
return property ( )
return property |
def _process_raw_report ( self , raw_report ) :
"Default raw input report data handler" | if not self . is_opened ( ) :
return
if not self . __evt_handlers and not self . __raw_handler :
return
if not raw_report [ 0 ] and ( raw_report [ 0 ] not in self . __input_report_templates ) : # windows sends an empty array when disconnecting
# but , this might have a collision with report _ id = 0
if not hid_device_path_exists ( self . device_path ) : # windows XP sends empty report when disconnecting
self . __reading_thread . abort ( )
# device disconnected
return
if self . __raw_handler : # this might slow down data throughput , but at the expense of safety
self . __raw_handler ( helpers . ReadOnlyList ( raw_report ) )
return
# using pre - parsed report templates , by report id
report_template = self . __input_report_templates [ raw_report [ 0 ] ]
# old condition snapshot
old_values = report_template . get_usages ( )
# parse incoming data
report_template . set_raw_data ( raw_report )
# and compare it
event_applies = self . evt_decision
evt_handlers = self . __evt_handlers
for key in report_template . keys ( ) :
if key not in evt_handlers :
continue
# check if event handler exist !
for event_kind , handlers in evt_handlers [ key ] . items ( ) : # key = event _ kind , values = handler set
new_value = report_template [ key ] . value
if not event_applies [ event_kind ] ( old_values [ key ] , new_value ) :
continue
# decision applies , call handlers
for function_handler in handlers : # check if the application wants some particular parameter
if handlers [ function_handler ] :
function_handler ( new_value , event_kind , handlers [ function_handler ] )
else :
function_handler ( new_value , event_kind ) |
def stop ( self ) :
"""Stop consuming the stream and shutdown the background thread .""" | with self . _operational_lock :
self . _bidi_rpc . close ( )
if self . _thread is not None : # Resume the thread to wake it up in case it is sleeping .
self . resume ( )
self . _thread . join ( )
self . _thread = None |
def abort ( payment ) :
"""Abort a payment from its id .
: param payment : The payment id or payment object
: type payment : string | Payment
: return : The payment resource
: rtype : resources . Payment""" | if isinstance ( payment , resources . Payment ) :
payment = payment . id
http_client = HttpClient ( )
response , __ = http_client . patch ( routes . url ( routes . PAYMENT_RESOURCE , resource_id = payment ) , { 'abort' : True } )
return resources . Payment ( ** response ) |
def _split_coefficents ( self , w ) :
"""Split into intercept / bias and feature - specific coefficients""" | if self . _fit_intercept :
bias = w [ 0 ]
wf = w [ 1 : ]
else :
bias = 0.0
wf = w
return bias , wf |
def reset ( self ) :
"""Instruct the target to forget any related exception .""" | if not self . chain_id :
return
saved , self . chain_id = self . chain_id , None
try :
self . call_no_reply ( mitogen . core . Dispatcher . forget_chain , saved )
finally :
self . chain_id = saved |
def add_particles_ascii ( self , s ) :
"""Adds particles from an ASCII string .
Parameters
s : string
One particle per line . Each line should include particle ' s mass , radius , position and velocity .""" | for l in s . split ( "\n" ) :
r = l . split ( )
if len ( r ) :
try :
r = [ float ( x ) for x in r ]
p = Particle ( simulation = self , m = r [ 0 ] , r = r [ 1 ] , x = r [ 2 ] , y = r [ 3 ] , z = r [ 4 ] , vx = r [ 5 ] , vy = r [ 6 ] , vz = r [ 7 ] )
self . add ( p )
except :
raise AttributeError ( "Each line requires 8 floats corresponding to mass, radius, position (x,y,z) and velocity (x,y,z)." ) |
def siblings ( self , ** kwargs ) :
"""Retrieve the other activities that also belong to the parent .
It returns a combination of Tasks ( a . o . UserTasks ) and Subprocesses on the level of the current task , including
itself . This also works if the activity is of type ` ActivityType . PROCESS ` .
: param kwargs : Additional search arguments , check : func : ` pykechain . Client . activities ` for additional info
: type kwargs : dict or None
: return : list of : class : ` Activity2 `
: raises NotFoundError : when it is a task in the top level of a project
Example
> > > task = project . activity ( ' Some Task ' )
> > > siblings = task . siblings ( )
Example for siblings containing certain words in the task name
> > > task = project . activity ( ' Some Task ' )
> > > siblings = task . siblings ( name _ _ contains = ' Another Task ' )""" | parent_id = self . _json_data . get ( 'parent_id' )
if parent_id is None :
raise NotFoundError ( "Cannot find subprocess for this task '{}', " "as this task exist on top level." . format ( self . name ) )
return self . _client . activities ( parent_id = parent_id , scope = self . scope_id , ** kwargs ) |
def send ( self , request , ** kwargs ) : # type : ( ClientRequest , Any ) - > ClientResponse
"""Send request object according to configuration .
Allowed kwargs are :
- session : will override the driver session and use yours . Should NOT be done unless really required .
- anything else is sent straight to requests .
: param ClientRequest request : The request object to be sent .""" | # It ' s not recommended to provide its own session , and is mostly
# to enable some legacy code to plug correctly
session = kwargs . pop ( 'session' , self . session )
try :
response = session . request ( request . method , request . url , ** kwargs )
except requests . RequestException as err :
msg = "Error occurred in request."
raise_with_traceback ( ClientRequestError , msg , err )
return RequestsClientResponse ( request , response ) |
def _extract_upnperror ( self , err_xml ) :
"""Extract the error code and error description from an error returned by the device .""" | nsmap = { 's' : list ( err_xml . nsmap . values ( ) ) [ 0 ] }
fault_str = err_xml . findtext ( 's:Body/s:Fault/faultstring' , namespaces = nsmap )
try :
err = err_xml . xpath ( 's:Body/s:Fault/detail/*[name()="%s"]' % fault_str , namespaces = nsmap ) [ 0 ]
except IndexError :
msg = 'Tag with name of %r was not found in the error response.' % fault_str
self . _log . debug ( msg + '\n' + etree . tostring ( err_xml , pretty_print = True ) . decode ( 'utf8' ) )
raise SOAPProtocolError ( msg )
err_code = err . findtext ( 'errorCode' , namespaces = err . nsmap )
err_desc = err . findtext ( 'errorDescription' , namespaces = err . nsmap )
if err_code is None or err_desc is None :
msg = 'Tags errorCode or errorDescription were not found in the error response.'
self . _log . debug ( msg + '\n' + etree . tostring ( err_xml , pretty_print = True ) . decode ( 'utf8' ) )
raise SOAPProtocolError ( msg )
return int ( err_code ) , err_desc |
def list_overlay_names ( self ) :
"""Return list of overlay names .""" | overlay_names = [ ]
if not os . path . isdir ( self . _overlays_abspath ) :
return overlay_names
for fname in os . listdir ( self . _overlays_abspath ) :
name , ext = os . path . splitext ( fname )
overlay_names . append ( name )
return overlay_names |
def alignProcrustes ( sources , rigid = False ) :
"""Return an ` ` Assembly ` ` of aligned source actors with
the ` Procrustes ` algorithm . The output ` ` Assembly ` ` is normalized in size .
` Procrustes ` algorithm takes N set of points and aligns them in a least - squares sense
to their mutual mean . The algorithm is iterated until convergence ,
as the mean must be recomputed after each alignment .
: param bool rigid : if ` True ` scaling is disabled .
. . hint : : | align3 | | align3 . py | _""" | group = vtk . vtkMultiBlockDataGroupFilter ( )
for source in sources :
if sources [ 0 ] . N ( ) != source . N ( ) :
vc . printc ( "~times Procrustes error in align():" , c = 1 )
vc . printc ( " sources have different nr of points" , c = 1 )
exit ( 0 )
group . AddInputData ( source . polydata ( ) )
procrustes = vtk . vtkProcrustesAlignmentFilter ( )
procrustes . StartFromCentroidOn ( )
procrustes . SetInputConnection ( group . GetOutputPort ( ) )
if rigid :
procrustes . GetLandmarkTransform ( ) . SetModeToRigidBody ( )
procrustes . Update ( )
acts = [ ]
for i , s in enumerate ( sources ) :
poly = procrustes . GetOutput ( ) . GetBlock ( i )
actor = Actor ( poly )
actor . SetProperty ( s . GetProperty ( ) )
acts . append ( actor )
assem = Assembly ( acts )
assem . info [ "transform" ] = procrustes . GetLandmarkTransform ( )
return assem |
def diy ( expression_data , regressor_type , regressor_kwargs , gene_names = None , tf_names = 'all' , client_or_address = 'local' , early_stop_window_length = EARLY_STOP_WINDOW_LENGTH , limit = None , seed = None , verbose = False ) :
""": param expression _ data : one of :
* a pandas DataFrame ( rows = observations , columns = genes )
* a dense 2D numpy . ndarray
* a sparse scipy . sparse . csc _ matrix
: param regressor _ type : string . One of : ' RF ' , ' GBM ' , ' ET ' . Case insensitive .
: param regressor _ kwargs : a dictionary of key - value pairs that configures the regressor .
: param gene _ names : optional list of gene names ( strings ) . Required when a ( dense or sparse ) matrix is passed as
' expression _ data ' instead of a DataFrame .
: param tf _ names : optional list of transcription factors . If None or ' all ' , the list of gene _ names will be used .
: param early _ stop _ window _ length : early stopping window length .
: param client _ or _ address : one of :
* None or ' local ' : a new Client ( LocalCluster ( ) ) will be used to perform the computation .
* string address : a new Client ( address ) will be used to perform the computation .
* a Client instance : the specified Client instance will be used to perform the computation .
: param limit : optional number ( int ) of top regulatory links to return . Default None .
: param seed : optional random seed for the regressors . Default 666 . Use None for random seed .
: param verbose : print info .
: return : a pandas DataFrame [ ' TF ' , ' target ' , ' importance ' ] representing the inferred gene regulatory links .""" | if verbose :
print ( 'preparing dask client' )
client , shutdown_callback = _prepare_client ( client_or_address )
try :
if verbose :
print ( 'parsing input' )
expression_matrix , gene_names , tf_names = _prepare_input ( expression_data , gene_names , tf_names )
if verbose :
print ( 'creating dask graph' )
graph = create_graph ( expression_matrix , gene_names , tf_names , client = client , regressor_type = regressor_type , regressor_kwargs = regressor_kwargs , early_stop_window_length = early_stop_window_length , limit = limit , seed = seed )
if verbose :
print ( '{} partitions' . format ( graph . npartitions ) )
print ( 'computing dask graph' )
return client . compute ( graph , sync = True ) . sort_values ( by = 'importance' , ascending = False )
finally :
shutdown_callback ( verbose )
if verbose :
print ( 'finished' ) |
def impute ( args ) :
"""% prog impute input . vcf hs37d5 . fa 1
Use IMPUTE2 to impute vcf on chromosome 1.""" | from pyfaidx import Fasta
p = OptionParser ( impute . __doc__ )
p . set_home ( "shapeit" )
p . set_home ( "impute" )
p . set_ref ( )
p . set_cpus ( )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
vcffile , fastafile , chr = args
mm = MakeManager ( )
pf = vcffile . rsplit ( "." , 1 ) [ 0 ]
hapsfile = pf + ".haps"
kg = op . join ( opts . ref , "1000GP_Phase3" )
shapeit_phasing ( mm , chr , vcffile , opts )
fasta = Fasta ( fastafile )
size = len ( fasta [ chr ] )
binsize = 5000000
bins = size / binsize
# 5Mb bins
if size % binsize :
bins += 1
impute_cmd = op . join ( opts . impute_home , "impute2" )
chunks = [ ]
for x in xrange ( bins + 1 ) :
chunk_start = x * binsize + 1
chunk_end = min ( chunk_start + binsize - 1 , size )
outfile = pf + ".chunk{0:02d}.impute2" . format ( x )
mapfile = "{0}/genetic_map_chr{1}_combined_b37.txt" . format ( kg , chr )
rpf = "{0}/1000GP_Phase3_chr{1}" . format ( kg , chr )
cmd = impute_cmd + " -m {0}" . format ( mapfile )
cmd += " -known_haps_g {0}" . format ( hapsfile )
cmd += " -h {0}.hap.gz -l {0}.legend.gz" . format ( rpf )
cmd += " -Ne 20000 -int {0} {1}" . format ( chunk_start , chunk_end )
cmd += " -o {0} -allow_large_regions -seed 367946" . format ( outfile )
cmd += " && touch {0}" . format ( outfile )
mm . add ( hapsfile , outfile , cmd )
chunks . append ( outfile )
# Combine all the files
imputefile = pf + ".impute2"
cmd = "cat {0} > {1}" . format ( " " . join ( chunks ) , imputefile )
mm . add ( chunks , imputefile , cmd )
# Convert to vcf
vcffile = pf + ".impute2.vcf"
cmd = "python -m jcvi.formats.vcf fromimpute2 {0} {1} {2} > {3}" . format ( imputefile , fastafile , chr , vcffile )
mm . add ( imputefile , vcffile , cmd )
mm . write ( ) |
def unpin_chat_message ( self , chat_id : Union [ int , str ] ) -> bool :
"""Use this method to unpin a message in a group , channel or your own chat .
You must be an administrator in the chat for this to work and must have the " can _ pin _ messages " admin
right in the supergroup or " can _ edit _ messages " admin right in the channel .
Args :
chat _ id ( ` ` int ` ` | ` ` str ` ` ) :
Unique identifier ( int ) or username ( str ) of the target chat .
Returns :
True on success .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .""" | self . send ( functions . messages . UpdatePinnedMessage ( peer = self . resolve_peer ( chat_id ) , id = 0 ) )
return True |
def get_entry_map ( self , group = None ) :
"""Return the entry point map for ` group ` , or the full entry map""" | try :
ep_map = self . _ep_map
except AttributeError :
ep_map = self . _ep_map = EntryPoint . parse_map ( self . _get_metadata ( 'entry_points.txt' ) , self )
if group is not None :
return ep_map . get ( group , { } )
return ep_map |
def _plot_weights_motif ( self , index , plot_type = "motif_raw" , background_probs = DEFAULT_BASE_BACKGROUND , ncol = 1 , figsize = None ) :
"""Index can only be a single int""" | w_all = self . get_weights ( )
if len ( w_all ) == 0 :
raise Exception ( "Layer needs to be initialized first" )
W = w_all [ 0 ]
if index is None :
index = np . arange ( W . shape [ 2 ] )
if isinstance ( index , int ) :
index = [ index ]
fig = plt . figure ( figsize = figsize )
if plot_type == "motif_pwm" and plot_type in self . AVAILABLE_PLOTS :
arr = pssm_array2pwm_array ( W , background_probs )
elif plot_type == "motif_raw" and plot_type in self . AVAILABLE_PLOTS :
arr = W
elif plot_type == "motif_pwm_info" and plot_type in self . AVAILABLE_PLOTS :
quasi_pwm = pssm_array2pwm_array ( W , background_probs )
arr = _pwm2pwm_info ( quasi_pwm )
else :
raise ValueError ( "plot_type needs to be from {0}" . format ( self . AVAILABLE_PLOTS ) )
fig = seqlogo_fig ( arr , vocab = self . VOCAB_name , figsize = figsize , ncol = ncol , plot_name = "filter: " )
# fig . show ( )
return fig |
def choices ( self ) :
"""Available choices for characters to be generated .""" | if self . _choices :
return self . _choices
for n in os . listdir ( self . _voicedir ) :
if len ( n ) == 1 and os . path . isdir ( os . path . join ( self . _voicedir , n ) ) :
self . _choices . append ( n )
return self . _choices |
def getContactTypes ( self ) :
"""Return an iterator of L { IContactType } providers available to this
organizer ' s store .""" | yield VIPPersonContactType ( )
yield EmailContactType ( self . store )
yield PostalContactType ( )
yield PhoneNumberContactType ( )
yield NotesContactType ( )
for getContactTypes in self . _gatherPluginMethods ( 'getContactTypes' ) :
for contactType in getContactTypes ( ) :
self . _checkContactType ( contactType )
yield contactType |
def money_flow_index ( close_data , high_data , low_data , volume , period ) :
"""Money Flow Index .
Formula :
MFI = 100 - ( 100 / ( 1 + PMF / NMF ) )""" | catch_errors . check_for_input_len_diff ( close_data , high_data , low_data , volume )
catch_errors . check_for_period_error ( close_data , period )
mf = money_flow ( close_data , high_data , low_data , volume )
tp = typical_price ( close_data , high_data , low_data )
flow = [ tp [ idx ] > tp [ idx - 1 ] for idx in range ( 1 , len ( tp ) ) ]
pf = [ mf [ idx ] if flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ]
nf = [ mf [ idx ] if not flow [ idx ] else 0 for idx in range ( 0 , len ( flow ) ) ]
pmf = [ sum ( pf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( pf ) ) ]
nmf = [ sum ( nf [ idx + 1 - period : idx + 1 ] ) for idx in range ( period - 1 , len ( nf ) ) ]
# Dividing by 0 is not an issue , it turns the value into NaN which we would
# want in that case
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , category = RuntimeWarning )
money_ratio = np . array ( pmf ) / np . array ( nmf )
mfi = 100 - ( 100 / ( 1 + money_ratio ) )
mfi = fill_for_noncomputable_vals ( close_data , mfi )
return mfi |
def extracted ( name , source , source_hash = None , source_hash_name = None , source_hash_update = False , skip_verify = False , password = None , options = None , list_options = None , force = False , overwrite = False , clean = False , user = None , group = None , if_missing = None , trim_output = False , use_cmd_unzip = None , extract_perms = True , enforce_toplevel = True , enforce_ownership_on = None , archive_format = None , ** kwargs ) :
'''. . versionadded : : 2014.1.0
. . versionchanged : : 2016.11.0
This state has been rewritten . Some arguments are new to this release
and will not be available in the 2016.3 release cycle ( and earlier ) .
Additionally , the * * ZIP Archive Handling * * section below applies
specifically to the 2016.11.0 release ( and newer ) .
Ensure that an archive is extracted to a specific directory .
. . important : :
* * Changes for 2016.11.0 * *
In earlier releases , this state would rely on the ` ` if _ missing ` `
argument to determine whether or not the archive needed to be
extracted . When this argument was not passed , then the state would just
assume ` ` if _ missing ` ` is the same as the ` ` name ` ` argument ( i . e . the
parent directory into which the archive would be extracted ) .
This caused a number of annoyances . One such annoyance was the need to
know beforehand a path that would result from the extraction of the
archive , and setting ` ` if _ missing ` ` to that directory , like so :
. . code - block : : yaml
extract _ myapp :
archive . extracted :
- name : / var / www
- source : salt : / / apps / src / myapp - 16.2.4 . tar . gz
- user : www
- group : www
- if _ missing : / var / www / myapp - 16.2.4
If ` ` / var / www ` ` already existed , this would effectively make
` ` if _ missing ` ` a required argument , just to get Salt to extract the
archive .
Some users worked around this by adding the top - level directory of the
archive to the end of the ` ` name ` ` argument , and then used ` ` - - strip ` `
or ` ` - - strip - components ` ` to remove that top - level dir when extracting :
. . code - block : : yaml
extract _ myapp :
archive . extracted :
- name : / var / www / myapp - 16.2.4
- source : salt : / / apps / src / myapp - 16.2.4 . tar . gz
- user : www
- group : www
With the rewrite for 2016.11.0 , these workarounds are no longer
necessary . ` ` if _ missing ` ` is still a supported argument , but it is no
longer required . The equivalent SLS in 2016.11.0 would be :
. . code - block : : yaml
extract _ myapp :
archive . extracted :
- name : / var / www
- source : salt : / / apps / src / myapp - 16.2.4 . tar . gz
- user : www
- group : www
Salt now uses a function called : py : func : ` archive . list
< salt . modules . archive . list > ` to get a list of files / directories in the
archive . Using this information , the state can now check the minion to
see if any paths are missing , and know whether or not the archive needs
to be extracted . This makes the ` ` if _ missing ` ` argument unnecessary in
most use cases .
. . important : :
* * ZIP Archive Handling * *
* Note : this information applies to 2016.11.0 and later . *
Salt has two different functions for extracting ZIP archives :
1 . : py : func : ` archive . unzip < salt . modules . archive . unzip > ` , which uses
Python ' s zipfile _ module to extract ZIP files .
2 . : py : func : ` archive . cmd _ unzip < salt . modules . archive . cmd _ unzip > ` , which
uses the ` ` unzip ` ` CLI command to extract ZIP files .
Salt will prefer the use of : py : func : ` archive . cmd _ unzip
< salt . modules . archive . cmd _ unzip > ` when CLI options are specified ( via
the ` ` options ` ` argument ) , and will otherwise prefer the
: py : func : ` archive . unzip < salt . modules . archive . unzip > ` function . Use
of : py : func : ` archive . cmd _ unzip < salt . modules . archive . cmd _ unzip > ` can be
forced however by setting the ` ` use _ cmd _ unzip ` ` argument to ` ` True ` ` .
By contrast , setting this argument to ` ` False ` ` will force usage of
: py : func : ` archive . unzip < salt . modules . archive . unzip > ` . For example :
. . code - block : : yaml
/ var / www :
archive . extracted :
- source : salt : / / foo / bar / myapp . zip
- use _ cmd _ unzip : True
When ` ` use _ cmd _ unzip ` ` is omitted , Salt will choose which extraction
function to use based on the source archive and the arguments passed to
the state . When in doubt , simply do not set this argument ; it is
provided as a means of overriding the logic Salt uses to decide which
function to use .
There are differences in the features available in both extraction
functions . These are detailed below .
- * Command - line options * ( only supported by : py : func : ` archive . cmd _ unzip
< salt . modules . archive . cmd _ unzip > ` ) - When the ` ` options ` ` argument is
used , : py : func : ` archive . cmd _ unzip < salt . modules . archive . cmd _ unzip > `
is the only function that can be used to extract the archive .
Therefore , if ` ` use _ cmd _ unzip ` ` is specified and set to ` ` False ` ` ,
and ` ` options ` ` is also set , the state will not proceed .
- * Permissions * - Due to an ` upstream bug in Python ` _ , permissions are
not preserved when the zipfile _ module is used to extract an archive .
As of the 2016.11.0 release , : py : func : ` archive . unzip
< salt . modules . archive . unzip > ` ( as well as this state ) has an
` ` extract _ perms ` ` argument which , when set to ` ` True ` ` ( the default ) ,
will attempt to match the permissions of the extracted
files / directories to those defined within the archive . To disable
this functionality and have the state not attempt to preserve the
permissions from the ZIP archive , set ` ` extract _ perms ` ` to ` ` False ` ` :
. . code - block : : yaml
/ var / www :
archive . extracted :
- source : salt : / / foo / bar / myapp . zip
- extract _ perms : False
. . _ ` upstream bug in Python ` : https : / / bugs . python . org / issue15795
name
Directory into which the archive should be extracted
source
Archive to be extracted
. . note : :
This argument uses the same syntax as its counterpart in the
: py : func : ` file . managed < salt . states . file . managed > ` state .
source _ hash
Hash of source file , or file with list of hash - to - file mappings
. . note : :
This argument uses the same syntax as its counterpart in the
: py : func : ` file . managed < salt . states . file . managed > ` state .
. . versionchanged : : 2016.11.0
If this argument specifies the hash itself , instead of a URI to a
file containing hashes , the hash type can now be omitted and Salt
will determine the hash type based on the length of the hash . For
example , both of the below states are now valid , while before only
the second one would be :
. . code - block : : yaml
foo _ app :
archive . extracted :
- name : / var / www
- source : https : / / mydomain . tld / foo . tar . gz
- source _ hash : 3360db35e682f1c5f9c58aa307de16d41361618c
bar _ app :
archive . extracted :
- name : / var / www
- source : https : / / mydomain . tld / bar . tar . gz
- source _ hash : sha1 = 5edb7d584b82ddcbf76e311601f5d4442974aaa5
source _ hash _ name
When ` ` source _ hash ` ` refers to a hash file , Salt will try to find the
correct hash by matching the filename part of the ` ` source ` ` URI . When
managing a file with a ` ` source ` ` of ` ` salt : / / files / foo . tar . gz ` ` , then
the following line in a hash file would match :
. . code - block : : text
acbd18db4cc2f85cedef654fccc4a4d8 foo . tar . gz
This line would also match :
. . code - block : : text
acbd18db4cc2f85cedef654fccc4a4d8 . / dir1 / foo . tar . gz
However , sometimes a hash file will include multiple similar paths :
. . code - block : : text
37b51d194a7513e45b56f6524f2d51f2 . / dir1 / foo . txt
acbd18db4cc2f85cedef654fccc4a4d8 . / dir2 / foo . txt
73feffa4b7f6bb68e44cf984c85f6e88 . / dir3 / foo . txt
In cases like this , Salt may match the incorrect hash . This argument
can be used to tell Salt which filename to match , to ensure that the
correct hash is identified . For example :
. . code - block : : yaml
/ var / www :
archive . extracted :
- source : https : / / mydomain . tld / dir2 / foo . tar . gz
- source _ hash : https : / / mydomain . tld / hashes
- source _ hash _ name : . / dir2 / foo . tar . gz
. . note : :
This argument must contain the full filename entry from the
checksum file , as this argument is meant to disambiguate matches
for multiple files that have the same basename . So , in the
example above , simply using ` ` foo . txt ` ` would not match .
. . versionadded : : 2016.11.0
source _ hash _ update : False
Set this to ` ` True ` ` if archive should be extracted if source _ hash has
changed . This would extract regardless of the ` ` if _ missing ` ` parameter .
Note that this is only checked if the ` ` source ` ` value has not changed .
If it has ( e . g . to increment a version number in the path ) then the
archive will not be extracted even if the hash has changed .
. . versionadded : : 2016.3.0
skip _ verify : False
If ` ` True ` ` , hash verification of remote file sources ( ` ` http : / / ` ` ,
` ` https : / / ` ` , ` ` ftp : / / ` ` ) will be skipped , and the ` ` source _ hash ` `
argument will be ignored .
. . versionadded : : 2016.3.4
keep _ source : True
For ` ` source ` ` archives not local to the minion ( i . e . from the Salt
fileserver or a remote source such as ` ` http ( s ) ` ` or ` ` ftp ` ` ) , Salt
will need to download the archive to the minion cache before they can
be extracted . To remove the downloaded archive after extraction , set
this argument to ` ` False ` ` .
. . versionadded : : 2017.7.3
keep : True
Same as ` ` keep _ source ` ` , kept for backward - compatibility .
. . note : :
If both ` ` keep _ source ` ` and ` ` keep ` ` are used , ` ` keep ` ` will be
ignored .
password
* * For ZIP archives only . * * Password used for extraction .
. . versionadded : : 2016.3.0
. . versionchanged : : 2016.11.0
The newly - added : py : func : ` archive . is _ encrypted
< salt . modules . archive . is _ encrypted > ` function will be used to
determine if the archive is password - protected . If it is , then the
` ` password ` ` argument will be required for the state to proceed .
options
* * For tar and zip archives only . * * This option can be used to specify
a string of additional arguments to pass to the tar / zip command .
If this argument is not used , then the minion will attempt to use
Python ' s native tarfile _ / zipfile _ support to extract it . For zip
archives , this argument is mostly used to overwrite existing files with
Using this argument means that the ` ` tar ` ` or ` ` unzip ` ` command will be
used , which is less platform - independent , so keep this in mind when
using this option ; the CLI options must be valid options for the
` ` tar ` ` / ` ` unzip ` ` implementation on the minion ' s OS .
. . versionadded : : 2016.11.0
. . versionchanged : : 2015.8.11,2016.3.2
XZ - compressed tar archives no longer require ` ` J ` ` to manually be
set in the ` ` options ` ` , they are now detected automatically and
decompressed using the xz _ CLI command and extracted using ` ` tar
xvf ` ` . This is a more platform - independent solution , as not all tar
implementations support the ` ` J ` ` argument for extracting archives .
. . note : :
For tar archives , main operators like ` ` - x ` ` , ` ` - - extract ` ` ,
` ` - - get ` ` , ` ` - c ` ` and ` ` - f ` ` / ` ` - - file ` ` should * not * be used here .
list _ options
* * For tar archives only . * * This state uses : py : func : ` archive . list
< salt . modules . archive . list _ > ` to discover the contents of the source
archive so that it knows which file paths should exist on the minion if
the archive has already been extracted . For the vast majority of tar
archives , : py : func : ` archive . list < salt . modules . archive . list _ > ` " just
works " . Archives compressed using gzip , bzip2 , and xz / lzma ( with the
help of the xz _ CLI command ) are supported automatically . However , for
archives compressed using other compression types , CLI options must be
passed to : py : func : ` archive . list < salt . modules . archive . list _ > ` .
This argument will be passed through to : py : func : ` archive . list
< salt . modules . archive . list _ > ` as its ` ` options ` ` argument , to allow it
to successfully list the archive ' s contents . For the vast majority of
archives , this argument should not need to be used , it should only be
needed in cases where the state fails with an error stating that the
archive ' s contents could not be listed .
. . versionadded : : 2016.11.0
force : False
If a path that should be occupied by a file in the extracted result is
instead a directory ( or vice - versa ) , the state will fail . Set this
argument to ` ` True ` ` to force these paths to be removed in order to
allow the archive to be extracted .
. . warning : :
Use this option * very * carefully .
. . versionadded : : 2016.11.0
overwrite : False
Set this to ` ` True ` ` to force the archive to be extracted . This is
useful for cases where the filenames / directories have not changed , but
the content of the files have .
. . versionadded : : 2016.11.1
clean : False
Set this to ` ` True ` ` to remove any top - level files and recursively
remove any top - level directory paths before extracting .
. . note : :
Files will only be cleaned first if extracting the archive is
deemed necessary , either by paths missing on the minion , or if
` ` overwrite ` ` is set to ` ` True ` ` .
. . versionadded : : 2016.11.1
user
The user to own each extracted file . Not available on Windows .
. . versionadded : : 2015.8.0
. . versionchanged : : 2016.3.0
When used in combination with ` ` if _ missing ` ` , ownership will only
be enforced if ` ` if _ missing ` ` is a directory .
. . versionchanged : : 2016.11.0
Ownership will be enforced only on the file / directory paths found
by running : py : func : ` archive . list < salt . modules . archive . list _ > ` on
the source archive . An alternative root directory on which to
enforce ownership can be specified using the
` ` enforce _ ownership _ on ` ` argument .
group
The group to own each extracted file . Not available on Windows .
. . versionadded : : 2015.8.0
. . versionchanged : : 2016.3.0
When used in combination with ` ` if _ missing ` ` , ownership will only
be enforced if ` ` if _ missing ` ` is a directory .
. . versionchanged : : 2016.11.0
Ownership will be enforced only on the file / directory paths found
by running : py : func : ` archive . list < salt . modules . archive . list _ > ` on
the source archive . An alternative root directory on which to
enforce ownership can be specified using the
` ` enforce _ ownership _ on ` ` argument .
if _ missing
If specified , this path will be checked , and if it exists then the
archive will not be extracted . This path can be either a directory or a
file , so this option can also be used to check for a semaphore file and
conditionally skip extraction .
. . versionchanged : : 2016.3.0
When used in combination with either ` ` user ` ` or ` ` group ` ` ,
ownership will only be enforced when ` ` if _ missing ` ` is a directory .
. . versionchanged : : 2016.11.0
Ownership enforcement is no longer tied to this argument , it is
simply checked for existence and extraction will be skipped if
if is present .
trim _ output : False
Useful for archives with many files in them . This can either be set to
` ` True ` ` ( in which case only the first 100 files extracted will be
in the state results ) , or it can be set to an integer for more exact
control over the max number of files to include in the state results .
. . versionadded : : 2016.3.0
use _ cmd _ unzip : False
Set to ` ` True ` ` for zip files to force usage of the
: py : func : ` archive . cmd _ unzip < salt . modules . archive . cmd _ unzip > ` function
to extract .
. . versionadded : : 2016.11.0
extract _ perms : True
* * For ZIP archives only . * * When using : py : func : ` archive . unzip
< salt . modules . archive . unzip > ` to extract ZIP archives , Salt works
around an ` upstream bug in Python ` _ to set the permissions on extracted
files / directories to match those encoded into the ZIP archive . Set this
argument to ` ` False ` ` to skip this workaround .
. . versionadded : : 2016.11.0
enforce _ toplevel : True
This option will enforce a single directory at the top level of the
source archive , to prevent extracting a ' tar - bomb ' . Set this argument
to ` ` False ` ` to allow archives with files ( or multiple directories ) at
the top level to be extracted .
. . versionadded : : 2016.11.0
enforce _ ownership _ on
When ` ` user ` ` or ` ` group ` ` is specified , Salt will default to enforcing
permissions on the file / directory paths detected by running
: py : func : ` archive . list < salt . modules . archive . list _ > ` on the source
archive . Use this argument to specify an alternate directory on which
ownership should be enforced .
. . note : :
This path must be within the path specified by the ` ` name ` `
argument .
. . versionadded : : 2016.11.0
archive _ format
One of ` ` tar ` ` , ` ` zip ` ` , or ` ` rar ` ` .
. . versionchanged : : 2016.11.0
If omitted , the archive format will be guessed based on the value
of the ` ` source ` ` argument . If the minion is running a release
older than 2016.11.0 , this option is required .
. . _ tarfile : https : / / docs . python . org / 2 / library / tarfile . html
. . _ zipfile : https : / / docs . python . org / 2 / library / zipfile . html
. . _ xz : http : / / tukaani . org / xz /
* * Examples * *
1 . tar with lmza ( i . e . xz ) compression :
. . code - block : : yaml
graylog2 - server :
archive . extracted :
- name : / opt /
- source : https : / / github . com / downloads / Graylog2 / graylog2 - server / graylog2 - server - 0.9.6p1 . tar . lzma
- source _ hash : md5 = 499ae16dcae71eeb7c3a30c75ea7a1a6
2 . tar archive with flag for verbose output , and enforcement of user / group
ownership :
. . code - block : : yaml
graylog2 - server :
archive . extracted :
- name : / opt /
- source : https : / / github . com / downloads / Graylog2 / graylog2 - server / graylog2 - server - 0.9.6p1 . tar . gz
- source _ hash : md5 = 499ae16dcae71eeb7c3a30c75ea7a1a6
- options : v
- user : foo
- group : foo
3 . tar archive , with ` ` source _ hash _ update ` ` set to ` ` True ` ` to prevent
state from attempting extraction unless the ` ` source _ hash ` ` differs
from the previous time the archive was extracted :
. . code - block : : yaml
graylog2 - server :
archive . extracted :
- name : / opt /
- source : https : / / github . com / downloads / Graylog2 / graylog2 - server / graylog2 - server - 0.9.6p1 . tar . lzma
- source _ hash : md5 = 499ae16dcae71eeb7c3a30c75ea7a1a6
- source _ hash _ update : True''' | ret = { 'name' : name , 'result' : False , 'changes' : { } , 'comment' : '' }
# Remove pub kwargs as they ' re irrelevant here .
kwargs = salt . utils . args . clean_kwargs ( ** kwargs )
if 'keep_source' in kwargs and 'keep' in kwargs :
ret . setdefault ( 'warnings' , [ ] ) . append ( 'Both \'keep_source\' and \'keep\' were used. Since these both ' 'do the same thing, \'keep\' was ignored.' )
keep_source = bool ( kwargs . pop ( 'keep_source' ) )
kwargs . pop ( 'keep' )
elif 'keep_source' in kwargs :
keep_source = bool ( kwargs . pop ( 'keep_source' ) )
elif 'keep' in kwargs :
keep_source = bool ( kwargs . pop ( 'keep' ) )
else : # Neither was passed , default is True
keep_source = True
if not _path_is_abs ( name ) :
ret [ 'comment' ] = '{0} is not an absolute path' . format ( name )
return ret
else :
if not name : # Empty name , like None , ' ' etc .
ret [ 'comment' ] = 'Name of the directory path needs to be specified'
return ret
# os . path . isfile ( ) returns False when there is a trailing slash , hence
# our need for first stripping the slash and then adding it back later .
# Otherwise , we can ' t properly check if the extraction location both a )
# exists and b ) is a file .
# > > > os . path . isfile ( ' / tmp / foo . txt ' )
# True
# > > > os . path . isfile ( ' / tmp / foo . txt / ' )
# False
name = name . rstrip ( os . sep )
if os . path . isfile ( name ) :
ret [ 'comment' ] = '{0} exists and is not a directory' . format ( name )
return ret
# Add back the slash so that file . makedirs properly creates the
# destdir if it needs to be created . file . makedirs expects a trailing
# slash in the directory path .
name += os . sep
if not _path_is_abs ( if_missing ) :
ret [ 'comment' ] = 'Value for \'if_missing\' is not an absolute path'
return ret
if not _path_is_abs ( enforce_ownership_on ) :
ret [ 'comment' ] = ( 'Value for \'enforce_ownership_on\' is not an ' 'absolute path' )
return ret
else :
if enforce_ownership_on is not None :
try :
not_rel = os . path . relpath ( enforce_ownership_on , name ) . startswith ( '..' + os . sep )
except Exception : # A ValueError is raised on Windows when the paths passed to
# os . path . relpath are not on the same drive letter . Using a
# generic Exception here to keep other possible exception types
# from making this state blow up with a traceback .
not_rel = True
if not_rel :
ret [ 'comment' ] = ( 'Value for \'enforce_ownership_on\' must be within {0}' . format ( name ) )
return ret
if if_missing is not None and os . path . exists ( if_missing ) :
ret [ 'result' ] = True
ret [ 'comment' ] = 'Path {0} exists' . format ( if_missing )
return ret
if user or group :
if salt . utils . platform . is_windows ( ) :
ret [ 'comment' ] = 'User/group ownership cannot be enforced on Windows minions'
return ret
if user :
uid = __salt__ [ 'file.user_to_uid' ] ( user )
if uid == '' :
ret [ 'comment' ] = 'User {0} does not exist' . format ( user )
return ret
else :
uid = - 1
if group :
gid = __salt__ [ 'file.group_to_gid' ] ( group )
if gid == '' :
ret [ 'comment' ] = 'Group {0} does not exist' . format ( group )
return ret
else :
gid = - 1
else : # We should never hit the ownership enforcement code unless user or
# group was specified , but just in case , set uid / gid to - 1 to make the
# os . chown ( ) a no - op and avoid a NameError .
uid = gid = - 1
if source_hash_update and not source_hash :
ret . setdefault ( 'warnings' , [ ] ) . append ( 'The \'source_hash_update\' argument is ignored when ' '\'source_hash\' is not also specified.' )
try :
source_match = __salt__ [ 'file.source_list' ] ( source , source_hash , __env__ ) [ 0 ]
except CommandExecutionError as exc :
ret [ 'result' ] = False
ret [ 'comment' ] = exc . strerror
return ret
urlparsed_source = _urlparse ( source_match )
urlparsed_scheme = urlparsed_source . scheme
urlparsed_path = os . path . join ( urlparsed_source . netloc , urlparsed_source . path ) . rstrip ( os . sep )
# urlparsed _ scheme will be the drive letter if this is a Windows file path
# This checks for a drive letter as the scheme and changes it to file
if urlparsed_scheme and urlparsed_scheme . lower ( ) in string . ascii_lowercase :
urlparsed_path = ':' . join ( [ urlparsed_scheme , urlparsed_path ] )
urlparsed_scheme = 'file'
source_hash_basename = urlparsed_path or urlparsed_source . netloc
source_is_local = urlparsed_scheme in salt . utils . files . LOCAL_PROTOS
if source_is_local : # Get rid of " file : / / " from start of source _ match
source_match = os . path . realpath ( os . path . expanduser ( urlparsed_path ) )
if not os . path . isfile ( source_match ) :
ret [ 'comment' ] = 'Source file \'{0}\' does not exist' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) )
return ret
valid_archive_formats = ( 'tar' , 'rar' , 'zip' )
if not archive_format :
archive_format = salt . utils . files . guess_archive_type ( source_hash_basename )
if archive_format is None :
ret [ 'comment' ] = ( 'Could not guess archive_format from the value of the ' '\'source\' argument. Please set this archive_format to one ' 'of the following: {0}' . format ( ', ' . join ( valid_archive_formats ) ) )
return ret
try :
archive_format = archive_format . lower ( )
except AttributeError :
pass
if archive_format not in valid_archive_formats :
ret [ 'comment' ] = ( 'Invalid archive_format \'{0}\'. Either set it to a supported ' 'value ({1}) or remove this argument and the archive format will ' 'be guesseed based on file extension.' . format ( archive_format , ', ' . join ( valid_archive_formats ) , ) )
return ret
if options is not None and not isinstance ( options , six . string_types ) :
options = six . text_type ( options )
strip_components = None
if options and archive_format == 'tar' :
try :
strip_components = int ( re . search ( r'''--strip(?:-components)?(?:\s+|=)["']?(\d+)["']?''' , options ) . group ( 1 ) )
except ( AttributeError , ValueError ) :
pass
if archive_format == 'zip' :
if options :
if use_cmd_unzip is None :
log . info ( 'Presence of CLI options in archive.extracted state for ' '\'%s\' implies that use_cmd_unzip is set to True.' , name )
use_cmd_unzip = True
elif not use_cmd_unzip : # use _ cmd _ unzip explicitly disabled
ret [ 'comment' ] = ( '\'use_cmd_unzip\' cannot be set to False if CLI options ' 'are being specified (via the \'options\' argument). ' 'Either remove \'use_cmd_unzip\', or set it to True.' )
return ret
if use_cmd_unzip :
if 'archive.cmd_unzip' not in __salt__ :
ret [ 'comment' ] = ( 'archive.cmd_unzip function not available, unzip might ' 'not be installed on minion' )
return ret
if password :
if use_cmd_unzip is None :
log . info ( 'Presence of a password in archive.extracted state for ' '\'%s\' implies that use_cmd_unzip is set to False.' , name )
use_cmd_unzip = False
elif use_cmd_unzip :
ret . setdefault ( 'warnings' , [ ] ) . append ( 'Using a password in combination with setting ' '\'use_cmd_unzip\' to True is considered insecure. It is ' 'recommended to remove the \'use_cmd_unzip\' argument (or ' 'set it to False) and allow Salt to extract the archive ' 'using Python\'s built-in ZIP file support.' )
else :
if password :
ret [ 'comment' ] = 'The \'password\' argument is only supported for zip archives'
return ret
if archive_format == 'rar' :
if 'archive.unrar' not in __salt__ :
ret [ 'comment' ] = ( 'archive.unrar function not available, rar/unrar might ' 'not be installed on minion' )
return ret
supports_options = ( 'tar' , 'zip' )
if options and archive_format not in supports_options :
ret [ 'comment' ] = ( 'The \'options\' argument is only compatible with the following ' 'archive formats: {0}' . format ( ', ' . join ( supports_options ) ) )
return ret
if trim_output :
if trim_output is True :
trim_output = 100
elif not isinstance ( trim_output , ( bool , six . integer_types ) ) :
try : # Try to handle cases where trim _ output was passed as a
# string - ified integer .
trim_output = int ( trim_output )
except TypeError :
ret [ 'comment' ] = ( 'Invalid value for trim_output, must be True/False or an ' 'integer' )
return ret
if source_hash :
try :
source_sum = __salt__ [ 'file.get_source_sum' ] ( source = source_match , source_hash = source_hash , source_hash_name = source_hash_name , saltenv = __env__ )
except CommandExecutionError as exc :
ret [ 'comment' ] = exc . strerror
return ret
else :
source_sum = { }
if source_is_local :
cached = source_match
else :
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = ( 'Archive {0} would be cached (if necessary) and checked to ' 'discover if extraction is needed' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) ) )
return ret
if 'file.cached' not in __states__ : # Shouldn ' t happen unless there is a traceback keeping
# salt / states / file . py from being processed through the loader . If
# that is the case , we have much more important problems as _ all _
# file states would be unavailable .
ret [ 'comment' ] = ( 'Unable to cache {0}, file.cached state not available' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) ) )
return ret
try :
result = __states__ [ 'file.cached' ] ( source_match , source_hash = source_hash , source_hash_name = source_hash_name , skip_verify = skip_verify , saltenv = __env__ )
except Exception as exc :
msg = 'Failed to cache {0}: {1}' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) , exc . __str__ ( ) )
log . exception ( msg )
ret [ 'comment' ] = msg
return ret
else :
log . debug ( 'file.cached: %s' , result )
if result [ 'result' ] : # Get the path of the file in the minion cache
cached = __salt__ [ 'cp.is_cached' ] ( source_match , saltenv = __env__ )
else :
log . debug ( 'failed to download %s' , salt . utils . url . redact_http_basic_auth ( source_match ) )
return result
existing_cached_source_sum = _read_cached_checksum ( cached )
if source_hash and source_hash_update and not skip_verify : # Create local hash sum file if we ' re going to track sum update
_update_checksum ( cached )
if archive_format == 'zip' and not password :
log . debug ( 'Checking %s to see if it is password-protected' , source_match )
# Either use _ cmd _ unzip was explicitly set to True , or was
# implicitly enabled by setting the " options " argument .
try :
encrypted_zip = __salt__ [ 'archive.is_encrypted' ] ( cached , clean = False , saltenv = __env__ )
except CommandExecutionError : # This would happen if archive _ format = zip and the source archive is
# not actually a zip file .
pass
else :
if encrypted_zip :
ret [ 'comment' ] = ( 'Archive {0} is password-protected, but no password was ' 'specified. Please set the \'password\' argument.' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) ) )
return ret
try :
contents = __salt__ [ 'archive.list' ] ( cached , archive_format = archive_format , options = list_options , strip_components = strip_components , clean = False , verbose = True )
except CommandExecutionError as exc :
contents = None
errors = [ ]
if not if_missing :
errors . append ( '\'if_missing\' must be set' )
if not enforce_ownership_on and ( user or group ) :
errors . append ( 'Ownership cannot be managed without setting ' '\'enforce_ownership_on\'.' )
msg = exc . strerror
if errors :
msg += '\n\n'
if archive_format == 'tar' :
msg += ( 'If the source archive is a tar archive compressed using ' 'a compression type not natively supported by the tar ' 'command, then setting the \'list_options\' argument may ' 'allow the contents to be listed. Otherwise, if Salt is ' 'unable to determine the files/directories in the ' 'archive, the following workaround(s) would need to be ' 'used for this state to proceed' )
else :
msg += ( 'The following workarounds must be used for this state to ' 'proceed' )
msg += ( ' (assuming the source file is a valid {0} archive):\n' . format ( archive_format ) )
for error in errors :
msg += '\n- {0}' . format ( error )
ret [ 'comment' ] = msg
return ret
if enforce_toplevel and contents is not None and ( len ( contents [ 'top_level_dirs' ] ) > 1 or contents [ 'top_level_files' ] ) :
ret [ 'comment' ] = ( 'Archive does not have a single top-level directory. ' 'To allow this archive to be extracted, set ' '\'enforce_toplevel\' to False. To avoid a ' '\'{0}-bomb\' it may also be advisable to set a ' 'top-level directory by adding it to the \'name\' ' 'value (for example, setting \'name\' to {1} ' 'instead of {2}).' . format ( archive_format , os . path . join ( name , 'some_dir' ) , name , ) )
return ret
extraction_needed = overwrite
contents_missing = False
# Check to see if we need to extract the archive . Using os . lstat ( ) in a
# try / except is considerably faster than using os . path . exists ( ) , and we
# already need to catch an OSError to cover edge cases where the minion is
# running as a non - privileged user and is trying to check for the existence
# of a path to which it does not have permission .
try :
if_missing_path_exists = os . path . exists ( if_missing )
except TypeError :
if_missing_path_exists = False
if not if_missing_path_exists :
if contents is None :
try :
os . lstat ( if_missing )
extraction_needed = False
except OSError as exc :
if exc . errno == errno . ENOENT :
extraction_needed = True
else :
ret [ 'comment' ] = ( 'Failed to check for existence of if_missing path ' '({0}): {1}' . format ( if_missing , exc . __str__ ( ) ) )
return ret
else :
incorrect_type = [ ]
for path_list , func in ( ( contents [ 'dirs' ] , stat . S_ISDIR ) , ( contents [ 'files' ] , lambda x : not stat . S_ISLNK ( x ) and not stat . S_ISDIR ( x ) ) , ( contents [ 'links' ] , stat . S_ISLNK ) ) :
for path in path_list :
full_path = salt . utils . path . join ( name , path )
try :
path_mode = os . lstat ( full_path . rstrip ( os . sep ) ) . st_mode
if not func ( path_mode ) :
incorrect_type . append ( path )
except OSError as exc :
if exc . errno == errno . ENOENT :
extraction_needed = True
contents_missing = True
elif exc . errno != errno . ENOTDIR : # In cases where a directory path was occupied by a
# file instead , all os . lstat ( ) calls to files within
# that dir will raise an ENOTDIR OSError . So we
# expect these and will only abort here if the
# error code is something else .
ret [ 'comment' ] = exc . __str__ ( )
return ret
if incorrect_type :
incorrect_paths = '\n\n' + '\n' . join ( [ '- {0}' . format ( x ) for x in incorrect_type ] )
ret [ 'comment' ] = ( 'The below paths (relative to {0}) exist, but are the ' 'incorrect type (file instead of directory, symlink ' 'instead of file, etc.).' . format ( name ) )
if __opts__ [ 'test' ] and clean and contents is not None :
ret [ 'result' ] = None
ret [ 'comment' ] += ( ' Since the \'clean\' option is enabled, the ' 'destination paths would be cleared and the ' 'archive would be extracted.{0}' . format ( incorrect_paths ) )
return ret
# Skip notices of incorrect types if we ' re cleaning
if not ( clean and contents is not None ) :
if not force :
ret [ 'comment' ] += ( ' To proceed with extraction, set \'force\' to ' 'True. Note that this will remove these paths ' 'before extracting.{0}' . format ( incorrect_paths ) )
return ret
else :
errors = [ ]
for path in incorrect_type :
full_path = os . path . join ( name , path )
try :
salt . utils . files . rm_rf ( full_path . rstrip ( os . sep ) )
ret [ 'changes' ] . setdefault ( 'removed' , [ ] ) . append ( full_path )
extraction_needed = True
except OSError as exc :
if exc . errno != errno . ENOENT :
errors . append ( exc . __str__ ( ) )
if errors :
msg = ( 'One or more paths existed by were the incorrect ' 'type (i.e. file instead of directory or ' 'vice-versa), but could not be removed. The ' 'following errors were observed:\n' )
for error in errors :
msg += '\n- {0}' . format ( error )
ret [ 'comment' ] = msg
return ret
if not extraction_needed and source_hash_update and existing_cached_source_sum is not None and not _compare_checksum ( cached , existing_cached_source_sum ) :
extraction_needed = True
source_hash_trigger = True
else :
source_hash_trigger = False
created_destdir = False
if extraction_needed :
if source_is_local and source_hash and not skip_verify :
ret [ 'result' ] = __salt__ [ 'file.check_hash' ] ( source_match , source_sum [ 'hsum' ] )
if not ret [ 'result' ] :
ret [ 'comment' ] = '{0} does not match the desired source_hash {1}' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) , source_sum [ 'hsum' ] )
return ret
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
ret [ 'comment' ] = 'Archive {0} would be extracted to {1}' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) , name )
if clean and contents is not None :
ret [ 'comment' ] += ', after cleaning destination path(s)'
_add_explanation ( ret , source_hash_trigger , contents_missing )
return ret
if clean and contents is not None :
errors = [ ]
log . debug ( 'Cleaning archive paths from within %s' , name )
for path in contents [ 'top_level_dirs' ] + contents [ 'top_level_files' ] :
full_path = os . path . join ( name , path )
try :
log . debug ( 'Removing %s' , full_path )
salt . utils . files . rm_rf ( full_path . rstrip ( os . sep ) )
ret [ 'changes' ] . setdefault ( 'removed' , [ ] ) . append ( full_path )
except OSError as exc :
if exc . errno != errno . ENOENT :
errors . append ( exc . __str__ ( ) )
if errors :
msg = ( 'One or more paths could not be cleaned. The following ' 'errors were observed:\n' )
for error in errors :
msg += '\n- {0}' . format ( error )
ret [ 'comment' ] = msg
return ret
if not os . path . isdir ( name ) :
__states__ [ 'file.directory' ] ( name , user = user , makedirs = True )
created_destdir = True
log . debug ( 'Extracting %s to %s' , cached , name )
try :
if archive_format == 'zip' :
if use_cmd_unzip :
try :
files = __salt__ [ 'archive.cmd_unzip' ] ( cached , name , options = options , trim_output = trim_output , password = password , ** kwargs )
except ( CommandExecutionError , CommandNotFoundError ) as exc :
ret [ 'comment' ] = exc . strerror
return ret
else :
files = __salt__ [ 'archive.unzip' ] ( cached , name , options = options , trim_output = trim_output , password = password , extract_perms = extract_perms , ** kwargs )
elif archive_format == 'rar' :
try :
files = __salt__ [ 'archive.unrar' ] ( cached , name , trim_output = trim_output , ** kwargs )
except ( CommandExecutionError , CommandNotFoundError ) as exc :
ret [ 'comment' ] = exc . strerror
return ret
else :
if options is None :
try :
with closing ( tarfile . open ( cached , 'r' ) ) as tar :
tar . extractall ( salt . utils . stringutils . to_str ( name ) )
files = tar . getnames ( )
if trim_output :
files = files [ : trim_output ]
except tarfile . ReadError :
if salt . utils . path . which ( 'xz' ) :
if __salt__ [ 'cmd.retcode' ] ( [ 'xz' , '-t' , cached ] , python_shell = False , ignore_retcode = True ) == 0 : # XZ - compressed data
log . debug ( 'Tar file is XZ-compressed, attempting ' 'decompression and extraction using XZ Utils ' 'and the tar command' )
# Must use python _ shell = True here because not
# all tar implementations support the - J flag
# for decompressing XZ - compressed data . We need
# to dump the decompressed data to stdout and
# pipe it to tar for extraction .
cmd = 'xz --decompress --stdout {0} | tar xvf -'
results = __salt__ [ 'cmd.run_all' ] ( cmd . format ( _cmd_quote ( cached ) ) , cwd = name , python_shell = True )
if results [ 'retcode' ] != 0 :
if created_destdir :
_cleanup_destdir ( name )
ret [ 'result' ] = False
ret [ 'changes' ] = results
return ret
if _is_bsdtar ( ) :
files = results [ 'stderr' ]
else :
files = results [ 'stdout' ]
else : # Failed to open tar archive and it is not
# XZ - compressed , gracefully fail the state
if created_destdir :
_cleanup_destdir ( name )
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'Failed to read from tar archive using ' 'Python\'s native tar file support. If ' 'archive is compressed using something ' 'other than gzip or bzip2, the ' '\'options\' argument may be required to ' 'pass the correct options to the tar ' 'command in order to extract the archive.' )
return ret
else :
if created_destdir :
_cleanup_destdir ( name )
ret [ 'result' ] = False
ret [ 'comment' ] = ( 'Failed to read from tar archive. If it is ' 'XZ-compressed, install xz-utils to attempt ' 'extraction.' )
return ret
else :
if not salt . utils . path . which ( 'tar' ) :
ret [ 'comment' ] = ( 'tar command not available, it might not be ' 'installed on minion' )
return ret
tar_opts = shlex . split ( options )
tar_cmd = [ 'tar' ]
tar_shortopts = 'x'
tar_longopts = [ ]
for position , opt in enumerate ( tar_opts ) :
if opt . startswith ( '-' ) :
tar_longopts . append ( opt )
else :
if position > 0 :
tar_longopts . append ( opt )
else :
append_opt = opt
append_opt = append_opt . replace ( 'x' , '' )
append_opt = append_opt . replace ( 'f' , '' )
tar_shortopts = tar_shortopts + append_opt
if __grains__ [ 'os' ] . lower ( ) == 'openbsd' :
tar_shortopts = '-' + tar_shortopts
tar_cmd . append ( tar_shortopts )
tar_cmd . extend ( tar_longopts )
tar_cmd . extend ( [ '-f' , cached ] )
results = __salt__ [ 'cmd.run_all' ] ( tar_cmd , cwd = name , python_shell = False )
if results [ 'retcode' ] != 0 :
ret [ 'result' ] = False
ret [ 'changes' ] = results
return ret
if _is_bsdtar ( ) :
files = results [ 'stderr' ]
else :
files = results [ 'stdout' ]
if not files :
files = 'no tar output so far'
except CommandExecutionError as exc :
ret [ 'comment' ] = exc . strerror
return ret
# Recursively set user and group ownership of files
enforce_missing = [ ]
enforce_failed = [ ]
if user or group :
if enforce_ownership_on :
if os . path . isdir ( enforce_ownership_on ) :
enforce_dirs = [ enforce_ownership_on ]
enforce_files = [ ]
enforce_links = [ ]
else :
enforce_dirs = [ ]
enforce_files = [ enforce_ownership_on ]
enforce_links = [ ]
else :
if contents is not None :
enforce_dirs = contents [ 'top_level_dirs' ]
enforce_files = contents [ 'top_level_files' ]
enforce_links = contents [ 'top_level_links' ]
recurse = [ ]
if user :
recurse . append ( 'user' )
if group :
recurse . append ( 'group' )
recurse_str = ', ' . join ( recurse )
owner_changes = dict ( [ ( x , y ) for x , y in ( ( 'user' , user ) , ( 'group' , group ) ) if y ] )
for dirname in enforce_dirs :
full_path = os . path . join ( name , dirname )
if not os . path . isdir ( full_path ) :
if not __opts__ [ 'test' ] :
enforce_missing . append ( full_path )
else :
log . debug ( 'Enforcing %s ownership on %s using a file.directory state%s' , recurse_str , dirname , ' (dry-run only)' if __opts__ [ 'test' ] else '' )
dir_result = __states__ [ 'file.directory' ] ( full_path , user = user , group = group , recurse = recurse )
log . debug ( 'file.directory: %s' , dir_result )
if dir_result . get ( 'changes' ) :
ret [ 'changes' ] [ 'updated ownership' ] = True
try :
if not dir_result [ 'result' ] :
enforce_failed . append ( full_path )
except ( KeyError , TypeError ) :
log . warning ( 'Bad state return %s for file.directory state on %s' , dir_result , dirname )
for filename in enforce_files + enforce_links :
full_path = os . path . join ( name , filename )
try : # Using os . lstat instead of calling out to
# _ _ salt _ _ [ ' file . stats ' ] , since we may be doing this for a lot
# of files , and simply calling os . lstat directly will speed
# things up a bit .
file_stat = os . lstat ( full_path )
except OSError as exc :
if not __opts__ [ 'test' ] :
if exc . errno == errno . ENOENT :
enforce_missing . append ( full_path )
enforce_failed . append ( full_path )
else : # Earlier we set uid , gid to - 1 if we ' re not enforcing
# ownership on user , group , as passing - 1 to os . chown will tell
# it not to change that ownership . Since we ' ve done that , we
# can selectively compare the uid / gid from the values in
# file _ stat , _ only if _ the " desired " uid / gid is something other
# than - 1.
if ( uid != - 1 and uid != file_stat . st_uid ) or ( gid != - 1 and gid != file_stat . st_gid ) :
if __opts__ [ 'test' ] :
ret [ 'changes' ] [ 'updated ownership' ] = True
else :
try :
os . lchown ( full_path , uid , gid )
ret [ 'changes' ] [ 'updated ownership' ] = True
except OSError :
enforce_failed . append ( filename )
if extraction_needed :
if files :
if created_destdir :
ret [ 'changes' ] [ 'directories_created' ] = [ name ]
ret [ 'changes' ] [ 'extracted_files' ] = files
ret [ 'comment' ] = '{0} extracted to {1}' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) , name , )
_add_explanation ( ret , source_hash_trigger , contents_missing )
ret [ 'result' ] = True
else :
ret [ 'result' ] = False
ret [ 'comment' ] = 'No files were extracted from {0}' . format ( salt . utils . url . redact_http_basic_auth ( source_match ) )
else :
ret [ 'result' ] = True
if if_missing_path_exists :
ret [ 'comment' ] = '{0} exists' . format ( if_missing )
else :
ret [ 'comment' ] = 'All files in archive are already present'
if __opts__ [ 'test' ] :
if ret [ 'changes' ] . get ( 'updated ownership' ) :
ret [ 'result' ] = None
ret [ 'comment' ] += ( '. Ownership would be updated on one or more ' 'files/directories.' )
if enforce_missing :
if not if_missing : # If is _ missing was used , and both a ) the archive had never been
# extracted , and b ) the path referred to by if _ missing exists , then
# enforce _ missing would contain paths of top _ level dirs / files that
# _ would _ have been extracted . Since if _ missing can be used as a
# semaphore to conditionally extract , we don ' t want to make this a
# case where the state fails , so we only fail the state if
# is _ missing is not used .
ret [ 'result' ] = False
ret [ 'comment' ] += ( '\n\nWhile trying to enforce user/group ownership, the following ' 'paths were missing:\n' )
for item in enforce_missing :
ret [ 'comment' ] += '\n- {0}' . format ( item )
if enforce_failed :
ret [ 'result' ] = False
ret [ 'comment' ] += ( '\n\nWhile trying to enforce user/group ownership, Salt was ' 'unable to change ownership on the following paths:\n' )
for item in enforce_failed :
ret [ 'comment' ] += '\n- {0}' . format ( item )
if not source_is_local :
if keep_source :
log . debug ( 'Keeping cached source file %s' , cached )
else :
log . debug ( 'Cleaning cached source file %s' , cached )
result = __states__ [ 'file.not_cached' ] ( source_match , saltenv = __env__ )
if not result [ 'result' ] : # Don ' t let failure to delete cached file cause the state
# itself to fail , just drop it in the warnings .
ret . setdefault ( 'warnings' , [ ] ) . append ( result [ 'comment' ] )
return ret |
def changes ( self , adding = None , deleting = None ) :
"""Pass the given changes to the root _ node .""" | if deleting is not None :
for deleted in deleting :
self . root_node . remove ( deleted )
if adding is not None :
for added in adding :
self . root_node . add ( added )
added = list ( )
removed = list ( )
for csn in self . _get_conflict_set_nodes ( ) :
c_added , c_removed = csn . get_activations ( )
added . extend ( c_added )
removed . extend ( c_removed )
return ( added , removed ) |
def nla_for_each_attr ( head , len_ , rem ) :
"""Iterate over a stream of attributes .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / include / netlink / attr . h # L262
Positional arguments :
head - - first nlattr with more in its bytearray payload ( nlattr class instance ) .
len _ - - length of attribute stream ( integer ) .
rem - - initialized to len , holds bytes currently remaining in stream ( c _ int ) .
Returns :
Generator yielding nlattr instances .""" | pos = head
rem . value = len_
while nla_ok ( pos , rem ) :
yield pos
pos = nla_next ( pos , rem ) |
def docelement ( self ) :
"""Returns the instance of the element whose body owns the docstring
in the current operation .""" | # This is needed since the decorating documentation
# for types and executables is in the body of the module , but when they
# get edited , the edit belongs to the type / executable because the character
# falls within the absstart and end attributes .
if self . _docelement is None :
if isinstance ( self . element , Module ) :
self . _docelement = self . element
else :
ichar = self . element . module . charindex ( self . icached [ 0 ] , 1 )
if ( ichar > self . element . docstart and ichar <= self . element . docend ) :
self . _docelement = self . element . parent
else :
self . _docelement = self . element
return self . _docelement |
def add_case ( self , case , update = False ) :
"""Add a case to the case collection
If the case exists and update is False raise error .
Args :
db ( MongoClient ) : A connection to the mongodb
case ( dict ) : A case dictionary
update ( bool ) : If existing case should be updated
Returns :
mongo _ case _ id ( ObjectId )""" | existing_case = self . case ( case )
if existing_case and not update :
raise CaseError ( "Case {} already exists" . format ( case [ 'case_id' ] ) )
if existing_case :
self . db . case . find_one_and_replace ( { 'case_id' : case [ 'case_id' ] } , case , )
else :
self . db . case . insert_one ( case )
return case |
def freeze_bn ( self ) :
'''Freeze BatchNorm layers .''' | for layer in self . modules ( ) :
if isinstance ( layer , nn . BatchNorm2d ) :
layer . eval ( ) |
def update ( cls , cluster_id_label , cluster_info ) :
"""Update the cluster with id / label ` cluster _ id _ label ` using information provided in
` cluster _ info ` .""" | conn = Qubole . agent ( version = "v2" )
return conn . put ( cls . element_path ( cluster_id_label ) , data = cluster_info ) |
def termination_check ( self ) : # type : ( Uploader ) - > bool
"""Check if terminated
: param Uploader self : this
: rtype : bool
: return : if terminated""" | with self . _upload_lock :
with self . _transfer_lock :
return ( self . _upload_terminate or len ( self . _exceptions ) > 0 or ( self . _all_files_processed and len ( self . _upload_set ) == 0 and len ( self . _transfer_set ) == 0 ) ) |
def load_vocab ( vocab_file ) :
"""Loads a vocabulary file into a dictionary .""" | vocab = collections . OrderedDict ( )
index = 0
with io . open ( vocab_file , 'r' ) as reader :
while True :
token = reader . readline ( )
if not token :
break
token = token . strip ( )
vocab [ token ] = index
index += 1
return vocab |
def to_twos_comp ( val , bits ) :
"""compute the 2 ' s compliment of int value val""" | if not val . startswith ( '-' ) :
return to_int ( val )
value = _invert ( to_bin_str_from_int_string ( bits , bin ( to_int ( val [ 1 : ] ) ) ) )
return int ( value , 2 ) + 1 |
def convert_upload_string_to_file ( i ) :
"""Input : {
file _ content _ base64 - string transmitted through Internet
( filename ) - file name to write ( if empty , generate tmp file )
Output : {
return - return code = 0 , if successful
> 0 , if error
( error ) - error text if return > 0
filename - filename with full path
filename _ ext - filename extension""" | import base64
x = i [ 'file_content_base64' ]
fc = base64 . urlsafe_b64decode ( str ( x ) )
# convert from unicode to str since base64 works on strings
# should be safe in Python 2 . x and 3 . x
fn = i . get ( 'filename' , '' )
if fn == '' :
rx = gen_tmp_file ( { 'prefix' : 'tmp-' } )
if rx [ 'return' ] > 0 :
return rx
px = rx [ 'file_name' ]
else :
px = fn
fn1 , fne = os . path . splitext ( px )
if os . path . isfile ( px ) :
return { 'return' : 1 , 'error' : 'file already exists in the current directory' }
try :
fx = open ( px , 'wb' )
fx . write ( fc )
fx . close ( )
except Exception as e :
return { 'return' : 1 , 'error' : 'problem writing file=' + px + ' (' + format ( e ) + ')' }
return { 'return' : 0 , 'filename' : px , 'filename_ext' : fne } |
def relation_column ( instance , fields ) :
'''such as : user . username
such as : replies . content''' | relation = getattr ( instance . __class__ , fields [ 0 ] ) . property
_field = getattr ( instance , fields [ 0 ] )
if relation . lazy == 'dynamic' :
_field = _field . first ( )
return getattr ( _field , fields [ 1 ] ) if _field else '' |
def get_image_path ( name , default = "not_found.png" ) :
"""Return image absolute path""" | for img_path in IMG_PATH :
full_path = osp . join ( img_path , name )
if osp . isfile ( full_path ) :
return osp . abspath ( full_path )
if default is not None :
img_path = osp . join ( get_module_path ( 'spyder' ) , 'images' )
return osp . abspath ( osp . join ( img_path , default ) ) |
def handle ( self , * args , ** options ) :
"""Main command method .""" | # Already running , so quit
if os . path . exists ( self . lock_file ) :
self . log ( ( "This script is already running. " "(If your are sure it's not please " "delete the lock file in {}')" ) . format ( self . lock_file ) )
sys . exit ( 0 )
if not os . path . exists ( os . path . dirname ( self . lock_file ) ) :
os . mkdir ( os . path . dirname ( self . lock_file ) , 0755 )
archives_path = options . get ( 'archives_path' )
self . log ( 'Using archives_path `%s`' % settings . SUPER_ARCHIVES_PATH )
if not os . path . exists ( archives_path ) :
msg = 'archives_path ({}) does not exist' . format ( archives_path )
raise CommandError ( msg )
run_lock = file ( self . lock_file , 'w' )
run_lock . close ( )
try :
self . import_emails ( archives_path , options . get ( 'all' ) , options . get ( 'exclude_lists' ) , )
except Exception as e :
logging . exception ( e )
raise
finally :
os . remove ( self . lock_file )
for mlist in MailingList . objects . all ( ) :
mlist . update_privacy ( )
mlist . save ( ) |
def dataSource ( self , value ) :
"""sets the datasource object""" | if isinstance ( value , DataSource ) :
self . _dataSource = value
else :
raise TypeError ( "value must be a DataSource object" ) |
def start ( self ) :
"""Begin listening for events from the Client and acting upon them .
Note : If configuration has not already been loaded , it will be loaded
immediately before starting to listen for events . Calling this method
without having specified and / or loaded a configuration will result in
completely default values being used .
After all modules for this controller are loaded , the STARTUP event
will be dispatched .""" | if not self . config and self . config_path is not None :
self . load_config ( )
self . running = True
self . process_event ( "STARTUP" , self . client , ( ) ) |
def _invade_isolated_Ts ( self ) :
r"""Throats that are uninvaded connected to pores that are both invaded
should be invaded too .""" | net = self . project . network
Ts = net [ 'throat.conns' ] . copy ( )
invaded_Ps = self [ 'pore.invasion_sequence' ] > - 1
uninvaded_Ts = self [ 'throat.invasion_sequence' ] == - 1
isolated_Ts = np . logical_and ( invaded_Ps [ Ts [ : , 0 ] ] , invaded_Ps [ Ts [ : , 1 ] ] )
isolated_Ts = np . logical_and ( isolated_Ts , uninvaded_Ts )
inv_Pc = self [ 'pore.invasion_pressure' ]
inv_seq = self [ 'pore.invasion_sequence' ]
if np . any ( isolated_Ts ) :
max_array = Ts [ : , 0 ]
second_higher = inv_seq [ Ts ] [ : , 1 ] > inv_seq [ Ts ] [ : , 0 ]
max_array [ second_higher ] = Ts [ : , 1 ] [ second_higher ]
mPc = inv_Pc [ max_array ]
mSeq = inv_seq [ max_array ]
mClu = self [ 'pore.cluster' ] [ max_array ]
self [ 'throat.invasion_pressure' ] [ isolated_Ts ] = mPc [ isolated_Ts ]
self [ 'throat.invasion_sequence' ] [ isolated_Ts ] = mSeq [ isolated_Ts ]
self [ 'throat.cluster' ] [ isolated_Ts ] = mClu [ isolated_Ts ] |
def convert_multiple_sources_to_consumable_types ( self , project , prop_set , sources ) :
"""Converts several files to consumable types .""" | if __debug__ :
from . targets import ProjectTarget
assert isinstance ( project , ProjectTarget )
assert isinstance ( prop_set , property_set . PropertySet )
assert is_iterable_typed ( sources , virtual_target . VirtualTarget )
if not self . source_types_ :
return list ( sources )
acceptable_types = set ( )
for t in self . source_types_ :
acceptable_types . update ( type . all_derived ( t ) )
result = [ ]
for source in sources :
if source . type ( ) not in acceptable_types :
transformed = construct_types ( project , None , self . source_types_ , prop_set , [ source ] )
# construct _ types returns [ prop _ set , [ targets ] ]
for t in transformed [ 1 ] :
if t . type ( ) in self . source_types_ :
result . append ( t )
if not transformed :
project . manager ( ) . logger ( ) . log ( __name__ , " failed to convert " , source )
else :
result . append ( source )
result = sequence . unique ( result , stable = True )
return result |
def check_for_update ( self , force = True , download = False ) :
"""Returns a : class : ` ~ plexapi . base . Release ` object containing release info .
Parameters :
force ( bool ) : Force server to check for new releases
download ( bool ) : Download if a update is available .""" | part = '/updater/check?download=%s' % ( 1 if download else 0 )
if force :
self . query ( part , method = self . _session . put )
releases = self . fetchItems ( '/updater/status' )
if len ( releases ) :
return releases [ 0 ] |
def save_anndata ( self , fname , data = 'adata_raw' , ** kwargs ) :
"""Saves ` adata _ raw ` to a . h5ad file ( AnnData ' s native file format ) .
Parameters
fname - string
The filename of the output file .""" | x = self . __dict__ [ data ]
x . write_h5ad ( fname , ** kwargs ) |
def notify_scale_factor_change ( self , screen_id , u32_scale_factor_w_multiplied , u32_scale_factor_h_multiplied ) :
"""Notify OpenGL HGCM host service about graphics content scaling factor change .
in screen _ id of type int
in u32 _ scale _ factor _ w _ multiplied of type int
in u32 _ scale _ factor _ h _ multiplied of type int""" | if not isinstance ( screen_id , baseinteger ) :
raise TypeError ( "screen_id can only be an instance of type baseinteger" )
if not isinstance ( u32_scale_factor_w_multiplied , baseinteger ) :
raise TypeError ( "u32_scale_factor_w_multiplied can only be an instance of type baseinteger" )
if not isinstance ( u32_scale_factor_h_multiplied , baseinteger ) :
raise TypeError ( "u32_scale_factor_h_multiplied can only be an instance of type baseinteger" )
self . _call ( "notifyScaleFactorChange" , in_p = [ screen_id , u32_scale_factor_w_multiplied , u32_scale_factor_h_multiplied ] ) |
def check_retcode ( self , line ) :
"""Look for retcode on line line and return return code if found .
: param line : Line to search from
: return : integer return code or - 1 if " cmd tasklet init " is found . None if retcode or cmd
tasklet init not found .""" | retcode = None
match = re . search ( r"retcode\: ([-\d]{1,})" , line )
if match :
retcode = num ( str ( match . group ( 1 ) ) )
match = re . search ( "cmd tasklet init" , line )
if match :
self . logger . debug ( "Device Boot up" , extra = { 'type' : ' ' } )
return - 1
return retcode |
def display ( self , ret , indent , out , rows_key = None , labels_key = None ) :
'''Display table ( s ) .''' | rows = [ ]
labels = None
if isinstance ( ret , dict ) :
if not rows_key or ( rows_key and rows_key in list ( ret . keys ( ) ) ) : # either not looking for a specific key
# either looking and found in the current root
for key in sorted ( ret ) :
if rows_key and key != rows_key :
continue
# if searching specifics , ignore anything else
val = ret [ key ]
if not rows_key :
out . append ( self . ustring ( indent , self . DARK_GRAY , # pylint : disable = no - member
key , suffix = ':' ) )
out . append ( self . ustring ( indent , self . DARK_GRAY , # pylint : disable = no - member
'----------' ) )
if isinstance ( val , ( list , tuple ) ) :
rows = val
if labels_key : # at the same depth
labels = ret . get ( labels_key )
# if any
out . extend ( self . display_rows ( rows , labels , indent ) )
else :
self . display ( val , indent + 4 , out , rows_key = rows_key , labels_key = labels_key )
elif rows_key : # dig deeper
for key in sorted ( ret ) :
val = ret [ key ]
self . display ( val , indent , out , rows_key = rows_key , labels_key = labels_key )
# same indent
elif isinstance ( ret , ( list , tuple ) ) :
if not rows_key :
rows = ret
out . extend ( self . display_rows ( rows , labels , indent ) )
return out |
def start ( self , name ) :
'''End the current behaviour and run a named behaviour .
: param name : the name of the behaviour to run
: type name : str''' | d = self . boatd . post ( { 'active' : name } , endpoint = '/behaviours' )
current = d . get ( 'active' )
if current is not None :
return 'started {}' . format ( current )
else :
return 'no behaviour running' |
def min_date ( self , symbol ) :
"""Return the minimum datetime stored for a particular symbol
Parameters
symbol : ` str `
symbol name for the item""" | res = self . _collection . find_one ( { SYMBOL : symbol } , projection = { ID : 0 , START : 1 } , sort = [ ( START , pymongo . ASCENDING ) ] )
if res is None :
raise NoDataFoundException ( "No Data found for {}" . format ( symbol ) )
return utc_dt_to_local_dt ( res [ START ] ) |
def select_template ( template_name_list , using = None ) :
"""Loads and returns a template for one of the given names .
Tries names in order and returns the first template found .
Raises TemplateDoesNotExist if no such template exists .""" | if isinstance ( template_name_list , six . string_types ) :
raise TypeError ( 'select_template() takes an iterable of template names but got a ' 'string: %r. Use get_template() if you want to load a single ' 'template by name.' % template_name_list )
engines = _engine_list ( using )
for template_name in template_name_list :
for engine in engines :
try :
return engine . get_template ( template_name )
except TemplateDoesNotExist as e :
pass
if template_name_list :
raise TemplateDoesNotExist ( ', ' . join ( template_name_list ) )
else :
raise TemplateDoesNotExist ( "No template names provided" ) |
def get_comic_format ( filename ) :
"""Return the comic format if it is a comic archive .""" | image_format = None
filename_ext = os . path . splitext ( filename ) [ - 1 ] . lower ( )
if filename_ext in _COMIC_EXTS :
if zipfile . is_zipfile ( filename ) :
image_format = _CBZ_FORMAT
elif rarfile . is_rarfile ( filename ) :
image_format = _CBR_FORMAT
return image_format |
def get_lattice_vector_equivalence ( point_symmetry ) :
"""Return ( b = = c , c = = a , a = = b )""" | # primitive _ vectors : column vectors
equivalence = [ False , False , False ]
for r in point_symmetry :
if ( np . abs ( r [ : , 0 ] ) == [ 0 , 1 , 0 ] ) . all ( ) :
equivalence [ 2 ] = True
if ( np . abs ( r [ : , 0 ] ) == [ 0 , 0 , 1 ] ) . all ( ) :
equivalence [ 1 ] = True
if ( np . abs ( r [ : , 1 ] ) == [ 1 , 0 , 0 ] ) . all ( ) :
equivalence [ 2 ] = True
if ( np . abs ( r [ : , 1 ] ) == [ 0 , 0 , 1 ] ) . all ( ) :
equivalence [ 0 ] = True
if ( np . abs ( r [ : , 2 ] ) == [ 1 , 0 , 0 ] ) . all ( ) :
equivalence [ 1 ] = True
if ( np . abs ( r [ : , 2 ] ) == [ 0 , 1 , 0 ] ) . all ( ) :
equivalence [ 0 ] = True
return equivalence |
def info ( self , msg : str ) -> None :
"""Write an info message to the Windows Application log
( ± to the Python disk log ) .""" | # noinspection PyUnresolvedReferences
s = "{}: {}" . format ( self . fullname , msg )
servicemanager . LogInfoMsg ( s )
if self . debugging :
log . info ( s ) |
def predict_is ( self , h = 5 , fit_once = True ) :
"""Makes dynamic in - sample predictions with the estimated model
Parameters
h : int ( default : 5)
How many steps would you like to forecast ?
fit _ once : boolean
( default : True ) Fits only once before the in - sample prediction ; if False , fits after every new datapoint
This method is not functional currently for this model
Returns
- pd . DataFrame with predicted values""" | predictions = [ ]
for t in range ( 0 , h ) :
x = NLLEV ( family = self . family , integ = self . integ , data = self . data_original [ : ( - h + t ) ] )
x . fit ( print_progress = False )
if t == 0 :
predictions = x . predict ( h = 1 )
else :
predictions = pd . concat ( [ predictions , x . predict ( h = 1 ) ] )
predictions . rename ( columns = { 0 : self . data_name } , inplace = True )
predictions . index = self . index [ - h : ]
return predictions |
def _log ( self , num = None , format = None ) :
'''Helper function to receive git log
: param num :
Number of entries
: param format :
Use formatted output with specified format string''' | num = '-n %s' % ( num ) if num else ''
format = '--format="%s"' % ( format ) if format else ''
return self . m ( 'getting git log' , cmdd = dict ( cmd = 'git log %s %s' % ( num , format ) , cwd = self . local ) , verbose = False ) |
def _UpdateSudoer ( self , user , sudoer = False ) :
"""Update sudoer group membership for a Linux user account .
Args :
user : string , the name of the Linux user account .
sudoer : bool , True if the user should be a sudoer .
Returns :
bool , True if user update succeeded .""" | if sudoer :
self . logger . info ( 'Adding user %s to the Google sudoers group.' , user )
command = self . gpasswd_add_cmd . format ( user = user , group = self . google_sudoers_group )
else :
self . logger . info ( 'Removing user %s from the Google sudoers group.' , user )
command = self . gpasswd_remove_cmd . format ( user = user , group = self . google_sudoers_group )
try :
subprocess . check_call ( command . split ( ' ' ) )
except subprocess . CalledProcessError as e :
self . logger . warning ( 'Could not update user %s. %s.' , user , str ( e ) )
return False
else :
self . logger . debug ( 'Removed user %s from the Google sudoers group.' , user )
return True |
def MaximumLikelihood ( self ) :
"""Returns the value with the highest probability .
Returns : float probability""" | prob , val = max ( ( prob , val ) for val , prob in self . Items ( ) )
return val |
def _handler ( self , conn ) :
"""Connection handler thread . Takes care of communication with the client
and running the proper task or applying a signal .""" | incoming = self . recv ( conn )
self . log ( DEBUG , incoming )
try : # E . g . [ ' twister ' , [ 7 , ' invert ' ] , { ' guess _ type ' : True } ]
task , args , kw = self . codec . decode ( incoming )
# OK , so we ' ve received the information . Now to use it .
self . log ( INFO , 'Fulfilling task %r' % task )
self . started_task ( )
pass_backend = False
obj = self . tasks [ task ]
if _is_iter ( obj ) : # ( callable , bool )
obj , pass_backend = obj
if pass_backend : # Have to do this , since args is a list
args = [ self ] + args
# Get and package the result
res = [ 'success' , obj ( * args , ** kw ) ]
except Exception as e :
self . log ( ERROR , 'Error while fullfilling task %r: %r' % ( task , e ) )
res = [ 'error' , e . __class__ . __name__ , e . args ]
if self . tracebacks :
show_err ( )
else :
self . log ( INFO , 'Finished fulfilling task %r' % task )
finally :
self . send ( conn , self . codec . encode ( res ) )
self . finished_task ( )
conn . close ( ) |
def lookup_ids ( self , keys ) :
"""Lookup the integer ID associated with each ( namespace , key ) in the
keys list""" | keys_len = len ( keys )
ids = { namespace_key : None for namespace_key in keys }
start = 0
bulk_insert = self . bulk_insert
query = 'SELECT namespace, key, id FROM gauged_keys WHERE '
check = '(namespace = %s AND key = %s) '
cursor = self . cursor
execute = cursor . execute
while start < keys_len :
rows = keys [ start : start + bulk_insert ]
params = [ param for params in rows for param in params ]
id_query = query + ( check + ' OR ' ) * ( len ( rows ) - 1 ) + check
execute ( id_query , params )
for namespace , key , id_ in cursor :
ids [ ( namespace , key ) ] = id_
start += bulk_insert
return ids |
def ecliptic_xyz ( self , epoch = None ) :
"""Compute J2000 ecliptic position vector ( x , y , z ) .
If you instead want the coordinates referenced to the dynamical
system defined by the Earth ' s true equator and equinox , provide
an epoch time .""" | if epoch is None :
vector = _ECLIPJ2000 . dot ( self . position . au )
return Distance ( vector )
position_au = self . position . au
if isinstance ( epoch , Time ) :
pass
elif isinstance ( epoch , float ) :
epoch = Time ( None , tt = epoch )
elif epoch == 'date' :
epoch = self . t
else :
raise ValueError ( 'the epoch= must be a Time object,' ' a floating point Terrestrial Time (TT),' ' or the string "date" for epoch-of-date' )
oblm , oblt , eqeq , psi , eps = epoch . _earth_tilt
e = oblt * DEG2RAD
rotation = einsum ( 'ij...,jk...->ik...' , rot_x ( - e ) , epoch . M )
position_au = einsum ( 'ij...,j...->i...' , rotation , position_au )
return Distance ( position_au ) |
def libvlc_event_attach ( p_event_manager , i_event_type , f_callback , user_data ) :
'''Register for an event notification .
@ param p _ event _ manager : the event manager to which you want to attach to . Generally it is obtained by vlc _ my _ object _ event _ manager ( ) where my _ object is the object you want to listen to .
@ param i _ event _ type : the desired event to which we want to listen .
@ param f _ callback : the function to call when i _ event _ type occurs .
@ param user _ data : user provided data to carry with the event .
@ return : 0 on success , ENOMEM on error .''' | f = _Cfunctions . get ( 'libvlc_event_attach' , None ) or _Cfunction ( 'libvlc_event_attach' , ( ( 1 , ) , ( 1 , ) , ( 1 , ) , ( 1 , ) , ) , None , ctypes . c_int , EventManager , ctypes . c_uint , Callback , ctypes . c_void_p )
return f ( p_event_manager , i_event_type , f_callback , user_data ) |
def startAlertListener ( self , callback = None ) :
"""Creates a websocket connection to the Plex Server to optionally recieve
notifications . These often include messages from Plex about media scans
as well as updates to currently running Transcode Sessions .
NOTE : You need websocket - client installed in order to use this feature .
> > pip install websocket - client
Parameters :
callback ( func ) : Callback function to call on recieved messages .
raises :
: class : ` plexapi . exception . Unsupported ` : Websocket - client not installed .""" | notifier = AlertListener ( self , callback )
notifier . start ( )
return notifier |
def obfuscatable_variable ( tokens , index , ignore_length = False ) :
"""Given a list of * tokens * and an * index * ( representing the current position ) ,
returns the token string if it is a variable name that can be safely
obfuscated .
Returns ' _ _ skipline _ _ ' if the rest of the tokens on this line should be skipped .
Returns ' _ _ skipnext _ _ ' if the next token should be skipped .
If * ignore _ length * is ` ` True ` ` , even variables that are already a single
character will be obfuscated ( typically only used with the ` ` - - nonlatin ` `
option ) .""" | tok = tokens [ index ]
token_type = tok [ 0 ]
token_string = tok [ 1 ]
line = tok [ 4 ]
if index > 0 :
prev_tok = tokens [ index - 1 ]
else : # Pretend it ' s a newline ( for simplicity )
prev_tok = ( 54 , '\n' , ( 1 , 1 ) , ( 1 , 2 ) , '#\n' )
prev_tok_type = prev_tok [ 0 ]
prev_tok_string = prev_tok [ 1 ]
try :
next_tok = tokens [ index + 1 ]
except IndexError : # Pretend it ' s a newline
next_tok = ( 54 , '\n' , ( 1 , 1 ) , ( 1 , 2 ) , '#\n' )
next_tok_string = next_tok [ 1 ]
if token_string == "=" :
return '__skipline__'
if token_type != tokenize . NAME :
return None
# Skip this token
if token_string . startswith ( '__' ) :
return None
if next_tok_string == "." :
if token_string in imported_modules :
return None
if prev_tok_string == 'import' :
return '__skipline__'
if prev_tok_string == "." :
return '__skipnext__'
if prev_tok_string == "for" :
if len ( token_string ) > 2 :
return token_string
if token_string == "for" :
return None
if token_string in keyword_args . keys ( ) :
return None
if token_string in [ "def" , "class" , 'if' , 'elif' , 'import' ] :
return '__skipline__'
if prev_tok_type != tokenize . INDENT and next_tok_string != '=' :
return '__skipline__'
if not ignore_length :
if len ( token_string ) < 3 :
return None
if token_string in RESERVED_WORDS :
return None
return token_string |
def fixchars ( self , text ) :
"""Find and replace problematic characters .""" | keys = '' . join ( Config . CHARFIXES . keys ( ) )
values = '' . join ( Config . CHARFIXES . values ( ) )
fixed = text . translate ( str . maketrans ( keys , values ) )
if fixed != text :
self . modified = True
return fixed |
def get_interface_detail_output_interface_ifHCInOctets ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_interface_detail = ET . Element ( "get_interface_detail" )
config = get_interface_detail
output = ET . SubElement ( get_interface_detail , "output" )
interface = ET . SubElement ( output , "interface" )
interface_type_key = ET . SubElement ( interface , "interface-type" )
interface_type_key . text = kwargs . pop ( 'interface_type' )
interface_name_key = ET . SubElement ( interface , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
ifHCInOctets = ET . SubElement ( interface , "ifHCInOctets" )
ifHCInOctets . text = kwargs . pop ( 'ifHCInOctets' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def report_parse ( self ) :
"""If the pipeline has previously been run on these data , instead of reading through the results , parse the
report instead""" | # Initialise lists
report_strains = list ( )
genus_list = list ( )
if self . analysistype == 'mlst' :
for sample in self . runmetadata . samples :
try :
genus_list . append ( sample . general . referencegenus )
except AttributeError :
sample . general . referencegenus = 'ND'
genus_list . append ( sample . general . referencegenus )
# Read in the report
if self . analysistype == 'mlst' :
for genus in genus_list :
try :
report_name = os . path . join ( self . reportpath , '{at}_{genus}.csv' . format ( at = self . analysistype , genus = genus ) )
report_strains = self . report_read ( report_strains = report_strains , report_name = report_name )
except FileNotFoundError :
report_name = self . report
report_strains = self . report_read ( report_strains = report_strains , report_name = report_name )
else :
report_name = self . report
report_strains = self . report_read ( report_strains = report_strains , report_name = report_name )
# Populate strains not in the report with ' empty ' GenObject with appropriate attributes
for sample in self . runmetadata . samples :
if sample . name not in report_strains :
setattr ( sample , self . analysistype , GenObject ( ) )
sample [ self . analysistype ] . sequencetype = 'ND'
sample [ self . analysistype ] . matches = 0
sample [ self . analysistype ] . results = dict ( ) |
def get_errors ( audit_results ) :
"""Args :
audit _ results : results of ` AxsAudit . do _ audit ( ) ` .
Returns : a list of errors .""" | errors = [ ]
if audit_results :
if audit_results . errors :
errors . extend ( audit_results . errors )
return errors |
def sort_dictionary_list ( dict_list , sort_key ) :
"""sorts a list of dictionaries based on the value of the sort _ key
dict _ list - a list of dictionaries
sort _ key - a string that identifies the key to sort the dictionaries with .
Test sorting a list of dictionaries :
> > > sort _ dictionary _ list ( [ { ' b ' : 1 , ' value ' : 2 } , { ' c ' : 2 , ' value ' : 3 } , { ' a ' : 3 , ' value ' : 1 } ] , ' value ' )
[ { ' a ' : 3 , ' value ' : 1 } , { ' b ' : 1 , ' value ' : 2 } , { ' c ' : 2 , ' value ' : 3 } ]""" | if not dict_list or len ( dict_list ) == 0 :
return dict_list
dict_list . sort ( key = itemgetter ( sort_key ) )
return dict_list |
def name ( self , value ) :
"""Generate the Site ' s slug ( for file paths , URL ' s , etc . )""" | self . _name = value
self . slug = re . sub ( '[^0-9a-zA-Z_-]+' , '_' , str ( value ) . lower ( ) )
self . root = os . path . abspath ( os . path . join ( _cfg . get ( 'Paths' , 'HttpRoot' ) , self . domain . name , self . slug ) ) |
def get ( self , id ) :
"""Return the : class : ` ~ plexapi . settings . Setting ` object with the specified id .""" | id = utils . lowerFirst ( id )
if id in self . _settings :
return self . _settings [ id ]
raise NotFound ( 'Invalid setting id: %s' % id ) |
def replace_contractions_with_full_words_and_replace_numbers_with_digits ( text = None , remove_articles = True ) :
"""This function replaces contractions with full words and replaces numbers
with digits in specified text . There is the option to remove articles .""" | words = text . split ( )
text_translated = ""
for word in words :
if remove_articles and word in [ "a" , "an" , "the" ] :
continue
contractions_expansions = { "ain't" : "is not" , "aren't" : "are not" , "can't" : "can not" , "could've" : "could have" , "couldn't" : "could not" , "didn't" : "did not" , "doesn't" : "does not" , "don't" : "do not" , "gonna" : "going to" , "gotta" : "got to" , "hadn't" : "had not" , "hasn't" : "has not" , "haven't" : "have not" , "he'd" : "he would" , "he'll" : "he will" , "he's" : "he is" , "how'd" : "how did" , "how'll" : "how will" , "how's" : "how is" , "I'd" : "I would" , "I'll" : "I will" , "I'm" : "I am" , "I've" : "I have" , "isn't" : "is not" , "it'd" : "it would" , "it'll" : "it will" , "it's" : "it is" , "mightn't" : "might not" , "might've" : "might have" , "mustn't" : "must not" , "must've" : "must have" , "needn't" : "need not" , "oughtn't" : "ought not" , "shan't" : "shall not" , "she'd" : "she would" , "she'll" : "she will" , "she's" : "she is" , "shouldn't" : "should not" , "should've" : "should have" , "somebody's" : "somebody is" , "someone'd" : "someone would" , "someone'll" : "someone will" , "someone's" : "someone is" , "that'll" : "that will" , "that's" : "that is" , "that'd" : "that would" , "there'd" : "there would" , "there're" : "there are" , "there's" : "there is" , "they'd" : "they would" , "they'll" : "they will" , "they're" : "they are" , "they've" : "they have" , "wasn't" : "was not" , "we'd" : "we would" , "we'll" : "we will" , "we're" : "we are" , "we've" : "we have" , "weren't" : "were not" , "what'd" : "what did" , "what'll" : "what will" , "what're" : "what are" , "what's" : "what is" , "whats" : "what is" , "what've" : "what have" , "when's" : "when is" , "when'd" : "when did" , "where'd" : "where did" , "where's" : "where is" , "where've" : "where have" , "who'd" : "who would" , "who'd've" : "who would have" , "who'll" : "who will" , "who're" : "who are" , "who's" : "who is" , "who've" : "who have" , "why'd" : "why did" , "why're" : "why are" , "why's" : "why is" , "won't" : "will not" , "won't've" : "will not have" , "would've" : "would have" , "wouldn't" : "would not" , "wouldn't've" : "would not have" , "y'all" : "you all" , "ya'll" : "you all" , "you'd" : "you would" , "you'd've" : "you would have" , "you'll" : "you will" , "y'aint" : "you are not" , "y'ain't" : "you are not" , "you're" : "you are" , "you've" : "you have" }
if word in list ( contractions_expansions . keys ( ) ) :
word = contractions_expansions [ word ]
numbers_digits = { "zero" : "0" , "one" : "1" , "two" : "2" , "three" : "3" , "four" : "4" , "five" : "5" , "six" : "6" , "seven" : "7" , "eight" : "8" , "nine" : "9" , "ten" : "10" , "eleven" : "11" , "twelve" : "12" , "thirteen" : "13" , "fourteen" : "14" , "fifteen" : "15" , "sixteen" : "16" , "seventeen" : "17" , "eighteen" : "18" , "nineteen" : "19" , "twenty" : "20" }
if word in list ( numbers_digits . keys ( ) ) :
word = numbers_digits [ word ]
text_translated += " " + word
text_translated = text_translated . strip ( )
return text_translated |
def _send_command_list ( self , commands ) :
"""Wrapper for Netmiko ' s send _ command method ( for list of commands .""" | output = ""
for command in commands :
output += self . device . send_command ( command , strip_prompt = False , strip_command = False )
return output |
def move_mouse_relative ( self , x , y ) :
"""Move the mouse relative to it ' s current position .
: param x : the distance in pixels to move on the X axis .
: param y : the distance in pixels to move on the Y axis .""" | _libxdo . xdo_move_mouse_relative ( self . _xdo , x , y ) |
def _kbhit_unix ( ) -> bool :
"""Under UNIX : is a keystroke available ?""" | dr , dw , de = select . select ( [ sys . stdin ] , [ ] , [ ] , 0 )
return dr != [ ] |
def plotFCM ( data , channel_names , kind = 'histogram' , ax = None , autolabel = True , xlabel_kwargs = { } , ylabel_kwargs = { } , colorbar = False , grid = False , ** kwargs ) :
"""Plots the sample on the current axis .
Follow with a call to matplotlibs show ( ) in order to see the plot .
Parameters
data : DataFrame
{ graph _ plotFCM _ pars }
{ common _ plot _ ax }
Returns
The output of the plot command used""" | if ax == None :
ax = pl . gca ( )
xlabel_kwargs . setdefault ( 'size' , 16 )
ylabel_kwargs . setdefault ( 'size' , 16 )
channel_names = to_list ( channel_names )
if len ( channel_names ) == 1 : # 1D so histogram plot
kwargs . setdefault ( 'color' , 'gray' )
kwargs . setdefault ( 'histtype' , 'stepfilled' )
kwargs . setdefault ( 'bins' , 200 )
# Do not move above
x = data [ channel_names [ 0 ] ] . values
if len ( x ) >= 1 :
if ( len ( x ) == 1 ) and isinstance ( kwargs [ 'bins' ] , int ) : # Only needed for hist ( not hist2d ) due to hist function doing
# excessive input checking
warnings . warn ( "One of the data sets only has a single event. " "This event won't be plotted unless the bin locations" " are explicitly provided to the plotting function. " )
return None
plot_output = ax . hist ( x , ** kwargs )
else :
return None
elif len ( channel_names ) == 2 :
x = data [ channel_names [ 0 ] ] . values
# value of first channel
y = data [ channel_names [ 1 ] ] . values
# value of second channel
if len ( x ) == 0 : # Don ' t draw a plot if there ' s no data
return None
if kind == 'scatter' :
kwargs . setdefault ( 'edgecolor' , 'none' )
plot_output = ax . scatter ( x , y , ** kwargs )
elif kind == 'histogram' :
kwargs . setdefault ( 'bins' , 200 )
# Do not move above
kwargs . setdefault ( 'cmin' , 1 )
kwargs . setdefault ( 'cmap' , pl . cm . copper )
kwargs . setdefault ( 'norm' , matplotlib . colors . LogNorm ( ) )
plot_output = ax . hist2d ( x , y , ** kwargs )
mappable = plot_output [ - 1 ]
if colorbar :
pl . colorbar ( mappable , ax = ax )
else :
raise ValueError ( "Not a valid plot type. Must be 'scatter', 'histogram'" )
else :
raise ValueError ( 'Received an unexpected number of channels: "{}"' . format ( channel_names ) )
pl . grid ( grid )
if autolabel :
y_label_text = 'Counts' if len ( channel_names ) == 1 else channel_names [ 1 ]
ax . set_xlabel ( channel_names [ 0 ] , ** xlabel_kwargs )
ax . set_ylabel ( y_label_text , ** ylabel_kwargs )
return plot_output |
def find_models ( self , constructor , constraints = None , * , columns = None , order_by = None , limiting = None , table_name = None ) :
"""Specialization of DataAccess . find _ all that returns models instead of cursor objects .""" | return self . _find_models ( constructor , table_name or constructor . table_name , constraints , columns = columns , order_by = order_by , limiting = limiting ) |
def inner_rect ( self ) :
"""The rectangular area inside the margin , border , and padding .
Generally widgets should avoid drawing or placing sub - widgets outside
this rectangle .""" | m = self . margin + self . _border_width + self . padding
if not self . border_color . is_blank :
m += 1
return Rect ( ( m , m ) , ( self . size [ 0 ] - 2 * m , self . size [ 1 ] - 2 * m ) ) |
def _UpdateAndMigrateUnmerged ( self , not_merged_stops , zone_map , merge_map , schedule ) :
"""Correct references in migrated unmerged stops and add to merged _ schedule .
For stops migrated from one of the input feeds to the output feed update the
parent _ station and zone _ id references to point to objects in the output
feed . Then add the migrated stop to the new schedule .
Args :
not _ merged _ stops : list of stops from one input feed that have not been
merged
zone _ map : map from zone _ id in the input feed to zone _ id in the output feed
merge _ map : map from Stop objects in the input feed to Stop objects in
the output feed
schedule : the input Schedule object""" | # for the unmerged stops , we use an already mapped zone _ id if possible
# if not , we generate a new one and add it to the map
for stop , migrated_stop in not_merged_stops :
if stop . zone_id in zone_map :
migrated_stop . zone_id = zone_map [ stop . zone_id ]
else :
migrated_stop . zone_id = self . feed_merger . GenerateId ( stop . zone_id )
zone_map [ stop . zone_id ] = migrated_stop . zone_id
if stop . parent_station :
parent_original = schedule . GetStop ( stop . parent_station )
migrated_stop . parent_station = merge_map [ parent_original ] . stop_id
self . feed_merger . merged_schedule . AddStopObject ( migrated_stop ) |
def fetch_digests ( self , package_name : str , package_version : str ) -> dict :
"""Fetch digests for the given package in specified version from the given package index .""" | report = { }
for source in self . _sources :
try :
report [ source . url ] = source . get_package_hashes ( package_name , package_version )
except NotFound as exc :
_LOGGER . debug ( f"Package {package_name} in version {package_version} not " f"found on index {source.name}: {str(exc)}" )
return report |
def set ( self , oid , value , value_type = None ) :
"""Sets a single OID value . If you do not pass value _ type hnmp will
try to guess the correct type . Autodetection is supported for :
* int and float ( as Integer , fractional part will be discarded )
* IPv4 address ( as IpAddress )
* str ( as OctetString )
Unfortunately , pysnmp does not support the SNMP FLOAT type so
please use Integer instead .""" | snmpsecurity = self . _get_snmp_security ( )
if value_type is None :
if isinstance ( value , int ) :
data = Integer ( value )
elif isinstance ( value , float ) :
data = Integer ( value )
elif isinstance ( value , str ) :
if is_ipv4_address ( value ) :
data = IpAddress ( value )
else :
data = OctetString ( value )
else :
raise TypeError ( "Unable to autodetect type. Please pass one of " "these strings as the value_type keyword arg: " ", " . join ( TYPES . keys ( ) ) )
else :
if value_type not in TYPES :
raise ValueError ( "'{}' is not one of the supported types: {}" . format ( value_type , ", " . join ( TYPES . keys ( ) ) ) )
data = TYPES [ value_type ] ( value )
try :
engine_error , pdu_error , pdu_error_index , objects = self . _cmdgen . setCmd ( snmpsecurity , cmdgen . UdpTransportTarget ( ( self . host , self . port ) , timeout = self . timeout , retries = self . retries ) , ( oid , data ) , )
if engine_error :
raise SNMPError ( engine_error )
if pdu_error :
raise SNMPError ( pdu_error . prettyPrint ( ) )
except Exception as e :
raise SNMPError ( e )
_ , value = objects [ 0 ]
value = _convert_value_to_native ( value )
return value |
def get_objective_search_session ( self ) :
"""Gets the OsidSession associated with the objective search
service .
return : ( osid . learning . ObjectiveSearchSession ) - an
ObjectiveSearchSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ objective _ search ( ) is false
compliance : optional - This method must be implemented if
supports _ objective _ search ( ) is true .""" | if not self . supports_objective_search ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
try :
session = sessions . ObjectiveSearchSession ( runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def parse_range_header ( header , maxlen = 0 ) :
'''Yield ( start , end ) ranges parsed from a HTTP Range header . Skip
unsatisfiable ranges . The end index is non - inclusive .''' | if not header or header [ : 6 ] != 'bytes=' :
return
ranges = [ r . split ( '-' , 1 ) for r in header [ 6 : ] . split ( ',' ) if '-' in r ]
for start , end in ranges :
try :
if not start : # bytes = - 100 - > last 100 bytes
start , end = max ( 0 , maxlen - int ( end ) ) , maxlen
elif not end : # bytes = 100 - - > all but the first 99 bytes
start , end = int ( start ) , maxlen
else : # bytes = 100-200 - > bytes 100-200 ( inclusive )
start , end = int ( start ) , min ( int ( end ) + 1 , maxlen )
if 0 <= start < end <= maxlen :
yield start , end
except ValueError :
pass |
def compute_asset_lifetimes ( frames ) :
"""Parameters
frames : dict [ str , pd . DataFrame ]
A dict mapping each OHLCV field to a dataframe with a row for
each date and a column for each sid , as passed to write ( ) .
Returns
start _ date _ ixs : np . array [ int64]
The index of the first date with non - nan values , for each sid .
end _ date _ ixs : np . array [ int64]
The index of the last date with non - nan values , for each sid .""" | # Build a 2D array ( dates x sids ) , where an entry is True if all
# fields are nan for the given day and sid .
is_null_matrix = np . logical_and . reduce ( [ frames [ field ] . isnull ( ) . values for field in FIELDS ] , )
if not is_null_matrix . size :
empty = np . array ( [ ] , dtype = 'int64' )
return empty , empty . copy ( )
# Offset of the first null from the start of the input .
start_date_ixs = is_null_matrix . argmin ( axis = 0 )
# Offset of the last null from the * * end * * of the input .
end_offsets = is_null_matrix [ : : - 1 ] . argmin ( axis = 0 )
# Offset of the last null from the start of the input
end_date_ixs = is_null_matrix . shape [ 0 ] - end_offsets - 1
return start_date_ixs , end_date_ixs |
def fetch_object ( self , doc_id ) :
"""Fetch the document by its PK .""" | try :
return self . object_class . objects . get ( pk = doc_id )
except self . object_class . DoesNotExist :
raise ReferenceNotFoundError |
def reboot ( self ) :
"""Reboots the device .
Generally one should use this method to reboot the device instead of
directly calling ` adb . reboot ` . Because this method gracefully handles
the teardown and restoration of running services .
This method is blocking and only returns when the reboot has completed
and the services restored .
Raises :
Error : Waiting for completion timed out .""" | if self . is_bootloader :
self . fastboot . reboot ( )
return
with self . handle_reboot ( ) :
self . adb . reboot ( ) |
def verify ( self , ** kwargs ) :
"""Checks this component for invalidating conditions
: returns : str - - message if error , 0 otherwise""" | if 'duration' in kwargs :
if kwargs [ 'duration' ] < self . _duration :
return "Window size must equal or exceed stimulus length"
if self . _risefall > self . _duration :
return "Rise and fall times exceed component duration"
return 0 |
def encode_certificate ( result ) :
"""Encode cert bytes to PEM encoded cert file .""" | cert_body = """-----BEGIN CERTIFICATE-----\n{0}\n-----END CERTIFICATE-----\n""" . format ( "\n" . join ( textwrap . wrap ( base64 . b64encode ( result ) . decode ( 'utf8' ) , 64 ) ) )
signed_crt = open ( "{}/signed.crt" . format ( gettempdir ( ) ) , "w" )
signed_crt . write ( cert_body )
signed_crt . close ( )
return True |
def show ( self ) :
"""Ensure the widget is shown .
Calling this method will also set the widget visibility to True .""" | self . visible = True
if self . proxy_is_active :
self . proxy . ensure_visible ( ) |
def mask ( scope , ips , mask ) :
"""Applies the given IP mask ( e . g . 255.255.255.0 ) to the given IP address
( or list of IP addresses ) and returns it .
: type ips : string
: param ips : A prefix , or a list of IP prefixes .
: type mask : string
: param mask : An IP mask .
: rtype : string
: return : The network ( s ) that result ( s ) from applying the mask .""" | mask = ipv4 . ip2int ( mask [ 0 ] )
return [ ipv4 . int2ip ( ipv4 . ip2int ( ip ) & mask ) for ip in ips ] |
def contains ( self , token : str ) -> bool :
"""Return if the token is in the list or not .""" | self . _validate_token ( token )
return token in self |
def delete_all_metadata ( self ) :
"""DELETE / : login / machines / : id / metadata
: Returns : current metadata
: rtype : empty : py : class : ` dict `
Deletes all the metadata stored for this machine . Also explicitly
requests and returns the machine metadata so that the local copy stays
synchronized .""" | j , r = self . datacenter . request ( 'DELETE' , self . path + '/metadata' )
r . raise_for_status ( )
return self . get_metadata ( ) |
def _write_proxy_conf ( proxyfile ) :
'''write to file''' | msg = 'Invalid value for proxy file provided!, Supplied value = {0}' . format ( proxyfile )
log . trace ( 'Salt Proxy Module: write proxy conf' )
if proxyfile :
log . debug ( 'Writing proxy conf file' )
with salt . utils . files . fopen ( proxyfile , 'w' ) as proxy_conf :
proxy_conf . write ( salt . utils . stringutils . to_str ( 'master = {0}' . format ( __grains__ [ 'master' ] ) ) )
msg = 'Wrote proxy file {0}' . format ( proxyfile )
log . debug ( msg )
return msg |
def get_locations ( self , filter_to_my_group = False ) :
"""Retrieve Locations listed in ChemInventory""" | resp = self . _post ( 'general-retrievelocations' , 'locations' )
groups = { }
if resp [ 'groupinfo' ] :
for group in resp [ 'groupinfo' ] :
groups [ group [ 'id' ] ] = Group ( name = group . get ( 'name' ) , inventory_id = group . get ( 'id' ) )
final_resp = [ ]
if resp [ 'data' ] :
if filter_to_my_group :
resp [ 'data' ] = { self . groupid : resp [ 'data' ] [ self . groupid ] }
for groupid , sublocation in resp [ 'data' ] . items ( ) :
if type ( sublocation ) is dict :
sublocation = [ loc for _ , loc in sublocation . items ( ) ]
sublocation = flatten_list ( sublocation )
if type ( sublocation ) is list :
sublocation = flatten_list ( sublocation )
for location in sublocation :
group = groups [ groupid ]
final_resp . append ( Location ( name = location . get ( 'name' ) , inventory_id = location . get ( 'id' ) , parent = location . get ( 'parent' ) , group = group , barcode = location . get ( 'barcode' ) ) )
return final_resp |
def solution_path ( self , min_lambda , max_lambda , lambda_bins , verbose = 0 ) :
'''Follows the solution path to find the best lambda value .''' | self . u = np . zeros ( self . Dk . shape [ 0 ] , dtype = 'double' )
lambda_grid = np . exp ( np . linspace ( np . log ( max_lambda ) , np . log ( min_lambda ) , lambda_bins ) )
aic_trace = np . zeros ( lambda_grid . shape )
# The AIC score for each lambda value
aicc_trace = np . zeros ( lambda_grid . shape )
# The AICc score for each lambda value ( correcting for finite sample size )
bic_trace = np . zeros ( lambda_grid . shape )
# The BIC score for each lambda value
dof_trace = np . zeros ( lambda_grid . shape )
# The degrees of freedom of each final solution
log_likelihood_trace = np . zeros ( lambda_grid . shape )
beta_trace = [ ]
best_idx = None
best_plateaus = None
if self . edges is None :
self . edges = defaultdict ( list )
elist = csr_matrix ( self . D ) . indices . reshape ( ( self . D . shape [ 0 ] , 2 ) )
for n1 , n2 in elist :
self . edges [ n1 ] . append ( n2 )
self . edges [ n2 ] . append ( n1 )
# Solve the series of lambda values with warm starts at each point
for i , lam in enumerate ( lambda_grid ) :
if verbose :
print ( '#{0} Lambda = {1}' . format ( i , lam ) )
# Fit to the final values
beta = self . solve ( lam )
if verbose :
print ( 'Calculating degrees of freedom' )
# Count the number of free parameters in the grid ( dof ) - - TODO : the graph trend filtering paper seems to imply we shouldn ' t multiply by ( k + 1 ) ?
dof_vals = self . Dk_minus_one . dot ( beta ) if self . k > 0 else beta
plateaus = calc_plateaus ( dof_vals , self . edges , rel_tol = 0.01 ) if ( self . k % 2 ) == 0 else nearly_unique ( dof_vals , rel_tol = 0.03 )
dof_trace [ i ] = max ( 1 , len ( plateaus ) )
# * ( k + 1)
if verbose :
print ( 'Calculating Information Criteria' )
# Get the negative log - likelihood
log_likelihood_trace [ i ] = - 0.5 * ( ( self . y - beta ) ** 2 ) . sum ( )
# Calculate AIC = 2k - 2ln ( L )
aic_trace [ i ] = 2. * dof_trace [ i ] - 2. * log_likelihood_trace [ i ]
# Calculate AICc = AIC + 2k * ( k + 1 ) / ( n - k - 1)
aicc_trace [ i ] = aic_trace [ i ] + 2 * dof_trace [ i ] * ( dof_trace [ i ] + 1 ) / ( len ( beta ) - dof_trace [ i ] - 1. )
# Calculate BIC = - 2ln ( L ) + k * ( ln ( n ) - ln ( 2pi ) )
bic_trace [ i ] = - 2 * log_likelihood_trace [ i ] + dof_trace [ i ] * ( np . log ( len ( beta ) ) - np . log ( 2 * np . pi ) )
# Track the best model thus far
if best_idx is None or bic_trace [ i ] < bic_trace [ best_idx ] :
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace . append ( np . array ( beta ) )
if verbose :
print ( 'DoF: {0} AIC: {1} AICc: {2} BIC: {3}\n' . format ( dof_trace [ i ] , aic_trace [ i ] , aicc_trace [ i ] , bic_trace [ i ] ) )
if verbose :
print ( 'Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]' . format ( lambda_grid [ best_idx ] , dof_trace [ best_idx ] , aic_trace [ best_idx ] , aicc_trace [ best_idx ] , bic_trace [ best_idx ] ) )
return { 'aic' : aic_trace , 'aicc' : aicc_trace , 'bic' : bic_trace , 'dof' : dof_trace , 'loglikelihood' : log_likelihood_trace , 'beta' : np . array ( beta_trace ) , 'lambda' : lambda_grid , 'best_idx' : best_idx , 'best' : beta_trace [ best_idx ] , 'plateaus' : best_plateaus } |
def _calc_percent_disk_stats ( self , stats ) :
"""Calculate a percentage of used disk space for data and metadata""" | mtypes = [ 'data' , 'metadata' ]
percs = { }
for mtype in mtypes :
used = stats . get ( 'docker.{0}.used' . format ( mtype ) )
total = stats . get ( 'docker.{0}.total' . format ( mtype ) )
free = stats . get ( 'docker.{0}.free' . format ( mtype ) )
if used and total and free and ceil ( total ) < free + used :
self . log . debug ( 'used, free, and total disk metrics may be wrong, ' 'used: %s, free: %s, total: %s' , used , free , total )
total = used + free
try :
if isinstance ( used , int ) :
percs [ 'docker.{0}.percent' . format ( mtype ) ] = round ( 100 * float ( used ) / float ( total ) , 2 )
elif isinstance ( free , int ) :
percs [ 'docker.{0}.percent' . format ( mtype ) ] = round ( 100 * ( 1.0 - ( float ( free ) / float ( total ) ) ) , 2 )
except ZeroDivisionError :
self . log . error ( 'docker.{0}.total is 0, calculating docker.{1}.percent' ' is not possible.' . format ( mtype , mtype ) )
return percs |
def create ( cls , name , servers = None , time_range = 'yesterday' , all_logs = False , filter_for_delete = None , comment = None , ** kwargs ) :
"""Create a new delete log task . Provide True to all _ logs to delete
all log types . Otherwise provide kwargs to specify each log by
type of interest .
: param str name : name for this task
: param servers : servers to back up . Servers must be instances of
management servers or log servers . If no value is provided , all
servers are backed up .
: type servers : list ( ManagementServer or LogServer )
: param str time _ range : specify a time range for the deletion . Valid
options are ' yesterday ' , ' last _ full _ week _ sun _ sat ' ,
' last _ full _ week _ mon _ sun ' , ' last _ full _ month ' ( default ' yesterday ' )
: param FilterExpression filter _ for _ delete : optional filter for deleting .
( default : FilterExpression ( ' Match All ' )
: param bool all _ logs : if True , all log types will be deleted . If this
is True , kwargs are ignored ( default : False )
: param kwargs : see : func : ` ~ log _ target _ types ` for keyword
arguments and default values .
: raises ElementNotFound : specified servers were not found
: raises CreateElementFailed : failure to create the task
: return : the task
: rtype : DeleteLogTask""" | if not servers :
servers = [ svr . href for svr in ManagementServer . objects . all ( ) ]
servers . extend ( [ svr . href for svr in LogServer . objects . all ( ) ] )
else :
servers = [ svr . href for svr in servers ]
filter_for_delete = filter_for_delete . href if filter_for_delete else FilterExpression ( 'Match All' ) . href
json = { 'name' : name , 'resources' : servers , 'time_limit_type' : time_range , 'start_time' : 0 , 'end_time' : 0 , 'file_format' : 'unknown' , 'filter_for_delete' : filter_for_delete , 'comment' : comment }
json . update ( ** log_target_types ( all_logs , ** kwargs ) )
return ElementCreator ( cls , json ) |
def recheck_limits ( self ) :
"""Re - check that the cached limits are the current limits .""" | limit_data = self . control_daemon . get_limits ( )
try : # Get the new checksum and list of limits
new_sum , new_limits = limit_data . get_limits ( self . limit_sum )
# Convert the limits list into a list of objects
lims = database . limits_hydrate ( self . db , new_limits )
# Save the new data
self . limits = lims
self . limit_map = dict ( ( lim . uuid , lim ) for lim in lims )
self . limit_sum = new_sum
except control . NoChangeException : # No changes to process ; just keep going . . .
return
except Exception : # Log an error
LOG . exception ( "Could not load limits" )
# Get our error set and publish channel
control_args = self . conf [ 'control' ]
error_key = control_args . get ( 'errors_key' , 'errors' )
error_channel = control_args . get ( 'errors_channel' , 'errors' )
# Get an informative message
msg = "Failed to load limits: " + traceback . format_exc ( )
# Store the message into the error set . We use a set
# here because it ' s likely that more than one node
# will generate the same message if there is an error ,
# and this avoids an explosion in the size of the set .
with utils . ignore_except ( ) :
self . db . sadd ( error_key , msg )
# Publish the message to a channel
with utils . ignore_except ( ) :
self . db . publish ( error_channel , msg ) |
def _rook_legal_for_castle ( self , rook ) :
"""Decides if given rook exists , is of this color , and has not moved so it
is eligible to castle .
: type : rook : Rook
: rtype : bool""" | return rook is not None and type ( rook ) is Rook and rook . color == self . color and not rook . has_moved |
def to_dataframe ( self , extra_edges_columns = [ ] ) :
"""Return this network in pandas DataFrame .
: return : Network as DataFrame . This is equivalent to SIF .""" | return df_util . to_dataframe ( self . session . get ( self . __url ) . json ( ) , edges_attr_cols = extra_edges_columns ) |
def connect_predecessors ( self , predecessors ) :
"""Connect all nodes in predecessors to this node .""" | for n in predecessors :
self . ingoing . append ( n )
n . outgoing . append ( self ) |
def clear_file_systems ( self ) :
"""Remove references to build and source file systems , reverting to the defaults""" | self . _source_url = None
self . dataset . config . library . source . url = None
self . _source_fs = None
self . _build_url = None
self . dataset . config . library . build . url = None
self . _build_fs = None
self . dataset . commit ( ) |
def device_query_retrieve ( self , query_id , ** kwargs ) : # noqa : E501
"""Retrieve a device query . # noqa : E501
Retrieve a specific device query . # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . device _ query _ retrieve ( query _ id , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param str query _ id : ( required )
: return : DeviceQuery
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . device_query_retrieve_with_http_info ( query_id , ** kwargs )
# noqa : E501
else :
( data ) = self . device_query_retrieve_with_http_info ( query_id , ** kwargs )
# noqa : E501
return data |
def policy_parameters ( self ) :
"""Parameters of policy""" | return it . chain ( self . policy_backbone . parameters ( ) , self . action_head . parameters ( ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.