signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def learnPlaceCode ( self , runs , dir = 1 , periodic = False , recurrent = True , randomSpeed = False , learnRecurrent = False ) :
"""Traverses a sinusoidal trajectory across the environment , learning during
the process . A pair of runs across the environment ( one in each direction )
takes 10 seconds if in a periodic larger environment , and 4 seconds in a
smaller nonperiodic environment .
: param runs : How many runs across the environment to do . Each " run " is
defined as a full sweep across the environment in each direction .
: param dir : Which direction to move in first . Valid values are 1 and - 1.
: param periodic : Whether or not the learning environment should be
periodic ( toroidal ) .
: param recurrent : Whether or not recurrent connections should be active
during learning . Warning : True can lead to instability .
: param randomSpeed : Whether or not to use a random maximum speed for each
run , to better simulate real learning . Can degrade performance .
Only supported in periodic environments .
: param learnRecurrent : Whether or not to learn recurrent connections ."""
|
# Simulate for a second to get nice starting activation bumps .
# Turn plotting off so as not to confuse the viewer
self . plotting = False
self . simulate ( 10 , 1 , 1 , 0 , envelope = False , inputNoise = None )
self . plotting = True
# Set up plotting
if self . plotting :
self . fig = plt . figure ( )
self . ax1 = self . fig . add_subplot ( 411 )
self . ax2 = self . fig . add_subplot ( 412 )
self . ax3 = self . fig . add_subplot ( 413 )
self . ax4 = self . fig . add_subplot ( 414 )
plt . ion ( )
plt . tight_layout ( )
self . ax3 . set_xlabel ( "Inhibitory-Inhibitory connections" )
self . fig . show ( )
self . fig . canvas . draw ( )
if self . movie :
history = [ ]
# Set up the trajectories and running times .
if not periodic :
time = 4. * runs
timings = [ np . arange ( 0 , time , self . dt ) ]
trajectories = [ ( np . sin ( dir * ( times * np . pi / 2 - np . pi / 2. ) ) + 1 ) / 2 ]
else : # Space the starting points of the runs out . This tends to improve the
# translation - invariance of the weight profiles , and thus gives better
# overall path integration .
startingPoint = 0
trajectories = [ ]
timings = [ ]
time = 0
residTime = 0
for run in xrange ( runs ) :
if randomSpeed :
speed = np . random . random ( ) + 0.5
else :
speed = 1.
length = 10. / speed
runTimes = np . arange ( 0 , length , self . dt )
trajectory [ : , 0 ] = ( np . sin ( dir * ( runTimes * np . pi / ( 5 / speed ) - np . pi / 2. ) ) + 1 ) * 2.5 + startingPoint
trajectory [ : , 1 ] = ( np . sin ( dir * ( runTimes * np . pi / ( 5 / speed ) - np . pi / 2. ) ) + 1 ) * 2.5
trajectories . append ( trajectory )
timings . append ( runTimes + time )
time += length
startingPoint += 1. / runs
for trajectory , timing in zip ( trajectories , timings ) :
self . activationsI = np . zeros ( self . activationsI . shape )
self . activationsER = np . zeros ( self . activationsER . shape )
self . activationsEL = np . zeros ( self . activationsEL . shape )
velocities = np . diff ( trajectory ) / self . dt
for i , t in enumerate ( timing [ : - 1 ] ) :
x = trajectory [ i ] % 1
v = velocities [ i ]
self . activationsP = np . exp ( - 1. * ( self . placeCode - x ) ** 2 / ( 2 * self . sigmaLoc ** 2 ) )
self . update ( 0 , 0 , v , recurrent = recurrent , envelope = ( not periodic ) , iSpeedTuning = periodic , enforceDale = True , )
self . stdpUpdate ( t , onlyPlace = not learnRecurrent )
# Finally , enforce Dale ' s law . Place neurons must be excitatory .
# Also keep the place weights from being too large .
np . maximum ( self . weightsPI , 0 , self . weightsPI )
np . minimum ( self . weightsPI , 3. , self . weightsPI )
for k , w in self . weightsPE . items ( ) :
np . maximum ( w , 0 , w )
np . minimum ( w , 3. , w )
residTime += self . dt
if residTime > PLOT_INTERVAL :
residTime -= PLOT_INTERVAL
if self . plotting :
self . ax4 . matshow ( self . weightsPI , cmap = plt . cm . coolwarm )
self . plotActivation ( position = x , time = t )
if self . movie :
history . append ( np . copy ( self . weightsPI ) )
if self . movie :
self . createMovie ( np . stack ( history , - 1 ) , "PIWeightEvolution" , self . numInhibitory , self . numPlaces )
self . stdpUpdate ( t , onlyPlace = not learnRecurrent , clearBuffer = True )
# Enforce Dale ' s law
np . minimum ( self . weightsII , 0 , self . weightsII )
np . maximum ( self . weightsPI , 0 , self . weightsPI )
for k , w in self . weightsIE . items ( ) :
np . minimum ( w , 0 , w )
for k , w in self . weightsEI . items ( ) :
np . maximum ( w , 0 , w )
for k , w in self . weightsPE . items ( ) :
np . maximum ( w , 0 , w )
|
def depth ( self ) -> int :
"""Depth of the citation scheme
. . example : : If we have a Book , Poem , Line system , and the citation we are looking at is Poem , depth is 1
: rtype : int
: return : Depth of the citation scheme"""
|
if len ( self . children ) :
return 1 + max ( [ child . depth for child in self . children ] )
else :
return 1
|
def update_rtfilters ( self ) :
"""Updates RT filters for each peer .
Should be called if a new RT Nlri ' s have changed based on the setting .
Currently only used by ` Processor ` to update the RT filters after it
has processed a RT destination . If RT filter has changed for a peer we
call RT filter change handler ."""
|
# Update RT filter for all peers
# TODO ( PH ) : Check if getting this map can be optimized ( if expensive )
new_peer_to_rtfilter_map = self . _compute_rtfilter_map ( )
# If we have new best path for RT NLRI , we have to update peer RT
# filters and take appropriate action of sending them NLRIs for other
# address - families as per new RT filter if necessary .
for peer in self . _peer_manager . iterpeers :
pre_rt_filter = self . _rt_mgr . peer_to_rtfilter_map . get ( peer , set ( ) )
curr_rt_filter = new_peer_to_rtfilter_map . get ( peer , set ( ) )
old_rts = pre_rt_filter - curr_rt_filter
new_rts = curr_rt_filter - pre_rt_filter
# If interested RTs for a peer changes
if new_rts or old_rts :
LOG . debug ( 'RT Filter for peer %s updated: ' 'Added RTs %s, Removed Rts %s' , peer . ip_address , new_rts , old_rts )
self . _on_update_rt_filter ( peer , new_rts , old_rts )
# Update to new RT filters
self . _peer_manager . set_peer_to_rtfilter_map ( new_peer_to_rtfilter_map )
self . _rt_mgr . peer_to_rtfilter_map = new_peer_to_rtfilter_map
LOG . debug ( 'Updated RT filters: %s' , self . _rt_mgr . peer_to_rtfilter_map )
# Update interested RTs i . e . RTs on the path that will be installed
# into global tables
self . _rt_mgr . update_interested_rts ( )
|
def collect_nsarg_norms ( self ) :
"""Adds canonical and decanonical values to NSArgs in AST
This prepares the AST object for ( de ) canonicalization"""
|
start_time = datetime . datetime . now ( )
self . ast = bel_utils . populate_ast_nsarg_defaults ( self . ast , self . ast )
self . ast . collected_nsarg_norms = True
if ( hasattr ( self . ast , "bel_object" ) and self . ast . bel_object and self . ast . bel_object . type == "BELAst" ) :
self . ast . bel_object . collected_nsarg_norms = True
end_time = datetime . datetime . now ( )
delta_ms = f"{(end_time - start_time).total_seconds() * 1000:.1f}"
log . info ( "Timing - prepare nsarg normalization" , delta_ms = delta_ms )
return self
|
def download_file ( url , file_name ) :
"""Helper for downloading a remote file to disk ."""
|
logger . info ( "Downloading URL: %s" , url )
file_size = 0
if not os . path . isfile ( file_name ) :
response = requests . get ( url , stream = True )
with open ( file_name , "wb" ) as fp :
if not response . ok :
raise Exception ( "Download exception. Will fail." )
for block in response . iter_content ( 1024 ) :
if not block :
break
fp . write ( block )
file_size += len ( block )
logger . info ( "Download finished, size is %d bytes." , file_size )
return file_size
|
def maxval_pos ( self ) :
"""The ` ` ( y , x ) ` ` coordinate of the maximum pixel value of the
` ` data ` ` within the source segment .
If there are multiple occurrences of the maximum value , only the
first occurence is returned ."""
|
if self . _is_completely_masked :
return ( np . nan , np . nan ) * u . pix
else :
yp , xp = self . maxval_cutout_pos . value
return ( yp + self . _slice [ 0 ] . start , xp + self . _slice [ 1 ] . start ) * u . pix
|
def get_objective_bank_nodes ( self , objective_bank_id = None , ancestor_levels = None , descendant_levels = None , include_siblings = None ) :
"""Gets a portion of the hierarchy for the given objective bank .
arg : includeSiblings ( boolean ) : true to include the siblings
of the given node , false to omit the siblings
return : ( osid . learning . ObjectiveBankNode ) - an objective bank
node
raise : NotFound - objectiveBankId not found
raise : NullArgument - objectiveBankId is null
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
compliance : mandatory - This method must be implemented ."""
|
if descendant_levels :
url_path = self . _urls . nodes ( alias = objective_bank_id , depth = descendant_levels )
else :
url_path = self . _urls . nodes ( alias = objective_bank_id )
return self . _get_request ( url_path )
|
def val ( self ) :
"""The ` ` < c : val > ` ` XML for this series , as an oxml element ."""
|
xml = self . _val_tmpl . format ( ** { 'nsdecls' : ' %s' % nsdecls ( 'c' ) , 'values_ref' : self . _series . values_ref , 'number_format' : self . _series . number_format , 'val_count' : len ( self . _series ) , 'val_pt_xml' : self . _val_pt_xml , } )
return parse_xml ( xml )
|
def get_api_service ( self , name = None ) :
"""Returns the specific service config definition"""
|
try :
svc = self . services_by_name . get ( name , None )
if svc is None :
raise ValueError ( f"Couldn't find the API service configuration" )
return svc
except : # NOQA
raise Exception ( f"Failed to retrieve the API service configuration" )
|
def get_exps ( self , path = '.' ) :
"""go through all subdirectories starting at path and return the experiment
identifiers ( = directory names ) of all existing experiments . A directory
is considered an experiment if it contains a experiment . cfg file ."""
|
exps = [ ]
for dp , dn , fn in os . walk ( path ) :
if 'experiment.cfg' in fn :
subdirs = [ os . path . join ( dp , d ) for d in os . listdir ( dp ) if os . path . isdir ( os . path . join ( dp , d ) ) ]
if all ( map ( lambda s : self . get_exps ( s ) == [ ] , subdirs ) ) :
exps . append ( dp )
return exps
|
def fft ( arr_obj , res_g = None , inplace = False , inverse = False , axes = None , plan = None , fast_math = True ) :
"""( inverse ) fourier trafo of 1-3D arrays
creates a new plan or uses the given plan
the transformed arr _ obj should be either a
- numpy array :
returns the fft as numpy array ( inplace is ignored )
- OCLArray of type complex64:
writes transform into res _ g if given , to arr _ obj if inplace
or returns a new OCLArray with the transform otherwise"""
|
if plan is None :
plan = fft_plan ( arr_obj . shape , arr_obj . dtype , axes = axes , fast_math = fast_math )
if isinstance ( arr_obj , np . ndarray ) :
return _ocl_fft_numpy ( plan , arr_obj , inverse = inverse )
elif isinstance ( arr_obj , OCLArray ) :
if not arr_obj . dtype . type is np . complex64 :
raise TypeError ( "OCLArray arr_obj has to be of complex64 type" )
if inplace :
_ocl_fft_gpu_inplace ( plan , arr_obj , inverse = inverse , )
else : # FIXME
raise NotImplementedError ( "currently only inplace fft is supported (FIXME)" )
return _ocl_fft_gpu ( plan , arr_obj , res_arr = res_g , inverse = inverse , )
else :
raise TypeError ( "array argument (1) has bad type: %s" % type ( arr_obj ) )
|
def add_error ( self , group , term , sub_term , value ) :
"""For records that are not defined as terms , either add it to the
errors list ."""
|
self . _errors [ ( group , term , sub_term ) ] = value
|
def gen_date_by_year ( year ) :
"""获取当前年的随机时间字符串
: param :
* year : ( string ) 长度为 4 位的年份字符串
: return :
* date _ str : ( string ) 传入年份的随机合法的日期
举例如下 : :
print ( ' - - - GetRandomTime . gen _ date _ by _ year demo - - - ' )
print ( GetRandomTime . gen _ date _ by _ year ( " 2010 " ) )
print ( ' - - - ' )
执行结果 : :
- - - GetRandomTime . gen _ date _ by _ year demo - - -
20100505"""
|
if isinstance ( year , int ) and len ( str ( year ) ) != 4 :
raise ValueError ( "year should be int year like 2018, but we got {}, {}" . format ( year , type ( year ) ) )
if isinstance ( year , str ) and len ( year ) != 4 :
raise ValueError ( "year should be string year like '2018', but we got {}, {}" . format ( year , type ( year ) ) )
if isinstance ( year , int ) :
year = str ( year )
date_str = GetRandomTime . gen_date_by_range ( year + "-01-01" , year + "-12-31" , "%Y%m%d" )
return date_str
|
def threat ( self , name , owner = None , ** kwargs ) :
"""Create the Threat TI object .
Args :
owner :
name :
* * kwargs :
Return :"""
|
return Threat ( self . tcex , name , owner = owner , ** kwargs )
|
def map_values ( f , D ) :
'''Map each value in the dictionary D to f ( value ) .'''
|
return { key : f ( val ) for key , val in D . iteritems ( ) }
|
def delete ( self , table ) :
"""Deletes record in table
> > > yql . delete ( ' yql . storage ' ) . where ( [ ' name ' , ' = ' , ' store : / / YEl70PraLLMSMuYAauqNc7 ' ] )"""
|
self . _table = table
self . _limit = None
self . _query = "DELETE FROM {0}" . format ( self . _table )
return self
|
def main ( ) :
"""Main"""
|
lic = ( 'License :: OSI Approved :: GNU Affero ' 'General Public License v3 or later (AGPLv3+)' )
version = load_source ( "version" , os . path . join ( "spamc" , "version.py" ) )
opts = dict ( name = "spamc" , version = version . __version__ , description = "Python spamassassin spamc client library" , long_description = get_readme ( ) , keywords = "spam spamc spamassassin" , author = "Andrew Colin Kissa" , author_email = "andrew@topdog.za.net" , url = "https://github.com/akissa/spamc" , license = "AGPLv3+" , packages = find_packages ( exclude = [ 'tests' ] ) , include_package_data = True , zip_safe = False , tests_require = TESTS_REQUIRE , install_requires = INSTALL_REQUIRES , classifiers = [ 'Development Status :: 4 - Beta' , 'Programming Language :: Python' , 'Programming Language :: Python :: 2.6' , 'Programming Language :: Python :: 2.7' , 'Topic :: Software Development :: Libraries :: Python Modules' , 'Intended Audience :: Developers' , lic , 'Natural Language :: English' , 'Operating System :: OS Independent' ] , )
setup ( ** opts )
|
def col_to_numeric ( df , col_name , dest = False ) :
"""Coerces a column in a DataFrame to numeric
Parameters :
df - DataFrame
DataFrame to operate on
col _ name - string
Name of column to coerce
dest - bool , default False
Whether to apply the result to the DataFrame or return it .
True is apply , False is return ."""
|
new_col = _pd . to_numeric ( df [ col_name ] , errors = 'coerce' )
if dest :
set_col ( df , col_name , new_col )
else :
return new_col
|
def _makemask ( self , dest , res , pos ) :
"""Create a bitmask .
The value being masked is of width ` res ` .
Limb number ` pos ` of ` dest ` is being assigned to ."""
|
if ( res is None or dest . bitwidth < res ) and 0 < ( dest . bitwidth - 64 * pos ) < 64 :
return '&0x{:X}' . format ( ( 1 << ( dest . bitwidth % 64 ) ) - 1 )
return ''
|
def setColumnType ( self , columnType ) :
"""Sets the column type for this widget to the inputed type value .
This will reset the widget to use one of the plugins for editing the
value of the column .
: param columnType | < orb . ColumnType >"""
|
if ( columnType == self . _columnType ) :
return False
self . _columnType = columnType
self . rebuild ( )
return True
|
def get_angle_difference ( v1 , v2 ) :
"""returns angular difference in degrees between two vectors . takes in cartesian coordinates ."""
|
v1 = numpy . array ( v1 )
v2 = numpy . array ( v2 )
angle = numpy . arccos ( old_div ( ( numpy . dot ( v1 , v2 ) ) , ( numpy . sqrt ( math . fsum ( v1 ** 2 ) ) * numpy . sqrt ( math . fsum ( v2 ** 2 ) ) ) ) )
return math . degrees ( angle )
|
def guess_extension ( amimetype , normalize = False ) :
"""Tries to guess extension for a mimetype .
@ param amimetype : name of a mimetype
@ time amimetype : string
@ return : the extension
@ rtype : string"""
|
ext = _mimes . guess_extension ( amimetype )
if ext and normalize : # Normalize some common magic mis - interpreation
ext = { '.asc' : '.txt' , '.obj' : '.bin' } . get ( ext , ext )
from invenio . legacy . bibdocfile . api_normalizer import normalize_format
return normalize_format ( ext )
return ext
|
def update_from_element ( self , elem ) :
"""Reset this ` Resource ` instance to represent the values in
the given XML element ."""
|
self . _elem = elem
for attrname in self . attributes :
try :
delattr ( self , attrname )
except AttributeError :
pass
document_url = elem . attrib . get ( 'href' )
if document_url is not None :
self . _url = document_url
return self
|
def include_flags ( self , arch ) :
'''Returns a string with the include folders'''
|
openssl_includes = join ( self . get_build_dir ( arch . arch ) , 'include' )
return ( ' -I' + openssl_includes + ' -I' + join ( openssl_includes , 'internal' ) + ' -I' + join ( openssl_includes , 'openssl' ) )
|
def setProfiles ( self , profiles ) :
"""Sets a list of profiles to be the options for the manager .
: param profiles | [ < XViewProfile > , . . ]"""
|
self . blockSignals ( True )
self . _profileCombo . blockSignals ( True )
self . _profiles = profiles [ : ]
self . _profileCombo . clear ( )
self . _profileCombo . addItems ( map ( lambda x : x . name ( ) , profiles ) )
self . _profileCombo . blockSignals ( False )
self . blockSignals ( False )
|
def keys ( self ) :
"""Keys
Returns a list of the node names in the parent
Returns :
list"""
|
if hasattr ( self . _nodes , 'iterkeys' ) :
return self . _nodes . keys ( )
else :
return tuple ( self . _nodes . keys ( ) )
|
def request ( method , url , ** kwargs ) :
"""Wrapper for the ` requests . request ( ) ` function .
It accepts the same arguments as the original , plus an optional ` retries `
that overrides the default retry mechanism ."""
|
retries = kwargs . pop ( 'retries' , None )
with Session ( retries = retries ) as session :
return session . request ( method = method , url = url , ** kwargs )
|
def log ( self , metric ) :
"""Format and output metric .
Args :
metric ( dict ) : Complete metric ."""
|
message = self . LOGFMT . format ( ** metric )
if metric [ 'context' ] :
message += ' context: {context}' . format ( context = metric [ 'context' ] )
self . _logger . log ( self . level , message )
|
def main ( ) :
global modem
modem = bm ( port = '/dev/ttyACM0' , incomingcallback = callback )
if modem . state == modem . STATE_FAILED :
print ( 'Unable to initialize modem, exiting.' )
return
"""Print modem information ."""
|
resp = modem . sendcmd ( 'ATI3' )
for line in resp :
if line :
print ( line )
try :
input ( 'Wait for call, press enter to exit' )
except ( SyntaxError , EOFError , KeyboardInterrupt ) :
pass
modem . close ( )
|
def _calc_rms ( mol1 , mol2 , clabel1 , clabel2 ) :
"""Calculate the RMSD .
Args :
mol1 : The first molecule . OpenBabel OBMol or pymatgen Molecule
object
mol2 : The second molecule . OpenBabel OBMol or pymatgen Molecule
object
clabel1 : The atom indices that can reorder the first molecule to
uniform atom order
clabel1 : The atom indices that can reorder the second molecule to
uniform atom order
Returns :
The RMSD ."""
|
obmol1 = BabelMolAdaptor ( mol1 ) . openbabel_mol
obmol2 = BabelMolAdaptor ( mol2 ) . openbabel_mol
cmol1 = ob . OBMol ( )
for i in clabel1 :
oa1 = obmol1 . GetAtom ( i )
a1 = cmol1 . NewAtom ( )
a1 . SetAtomicNum ( oa1 . GetAtomicNum ( ) )
a1 . SetVector ( oa1 . GetVector ( ) )
cmol2 = ob . OBMol ( )
for i in clabel2 :
oa2 = obmol2 . GetAtom ( i )
a2 = cmol2 . NewAtom ( )
a2 . SetAtomicNum ( oa2 . GetAtomicNum ( ) )
a2 . SetVector ( oa2 . GetVector ( ) )
aligner = ob . OBAlign ( True , False )
aligner . SetRefMol ( cmol1 )
aligner . SetTargetMol ( cmol2 )
aligner . Align ( )
return aligner . GetRMSD ( )
|
def run ( self , calc_bleu = True , epoch = None , iteration = None , eval_path = None , summary = False , reference_path = None ) :
"""Runs translation on test dataset .
: param calc _ bleu : if True compares results with reference and computes
BLEU score
: param epoch : index of the current epoch
: param iteration : index of the current iteration
: param eval _ path : path to the file for saving results
: param summary : if True prints summary
: param reference _ path : path to the file with reference translation"""
|
if self . cuda :
test_bleu = torch . cuda . FloatTensor ( [ 0 ] )
break_training = torch . cuda . LongTensor ( [ 0 ] )
else :
test_bleu = torch . FloatTensor ( [ 0 ] )
break_training = torch . LongTensor ( [ 0 ] )
if eval_path is None :
eval_path = self . build_eval_path ( epoch , iteration )
detok_eval_path = eval_path + '.detok'
with contextlib . suppress ( FileNotFoundError ) :
os . remove ( eval_path )
os . remove ( detok_eval_path )
rank = get_rank ( )
logging . info ( f'Running evaluation on test set' )
self . model . eval ( )
torch . cuda . empty_cache ( )
output = self . evaluate ( epoch , iteration , summary )
output = output [ : len ( self . loader . dataset ) ]
output = self . loader . dataset . unsort ( output )
if rank == 0 :
with open ( eval_path , 'a' ) as eval_file :
eval_file . writelines ( output )
if calc_bleu :
self . run_detokenizer ( eval_path )
test_bleu [ 0 ] = self . run_sacrebleu ( detok_eval_path , reference_path )
if summary :
logging . info ( f'BLEU on test dataset: {test_bleu[0]:.2f}' )
if self . target_bleu and test_bleu [ 0 ] >= self . target_bleu :
logging . info ( f'Target accuracy reached' )
break_training [ 0 ] = 1
barrier ( )
torch . cuda . empty_cache ( )
logging . info ( f'Finished evaluation on test set' )
if self . distributed :
dist . broadcast ( break_training , 0 )
dist . broadcast ( test_bleu , 0 )
return test_bleu [ 0 ] . item ( ) , break_training [ 0 ] . item ( )
|
def run_map ( self , dmap ) :
'''Execute the contents of the VM map'''
|
if self . _has_loop ( dmap ) :
msg = 'Uh-oh, that cloud map has a dependency loop!'
log . error ( msg )
raise SaltCloudException ( msg )
# Go through the create list and calc dependencies
for key , val in six . iteritems ( dmap [ 'create' ] ) :
log . info ( 'Calculating dependencies for %s' , key )
level = 0
level = self . _calcdep ( dmap , key , val , level )
log . debug ( 'Got execution order %s for %s' , level , key )
dmap [ 'create' ] [ key ] [ 'level' ] = level
try :
existing_list = six . iteritems ( dmap [ 'existing' ] )
except KeyError :
existing_list = six . iteritems ( { } )
for key , val in existing_list :
log . info ( 'Calculating dependencies for %s' , key )
level = 0
level = self . _calcdep ( dmap , key , val , level )
log . debug ( 'Got execution order %s for %s' , level , key )
dmap [ 'existing' ] [ key ] [ 'level' ] = level
# Now sort the create list based on dependencies
create_list = sorted ( six . iteritems ( dmap [ 'create' ] ) , key = lambda x : x [ 1 ] [ 'level' ] )
full_map = dmap [ 'create' ] . copy ( )
if 'existing' in dmap :
full_map . update ( dmap [ 'existing' ] )
possible_master_list = sorted ( six . iteritems ( full_map ) , key = lambda x : x [ 1 ] [ 'level' ] )
output = { }
if self . opts [ 'parallel' ] :
parallel_data = [ ]
master_name = None
master_minion_name = None
master_host = None
master_finger = None
for name , profile in possible_master_list :
if profile . get ( 'make_master' , False ) is True :
master_name = name
master_profile = profile
if master_name : # If the master already exists , get the host
if master_name not in dmap [ 'create' ] :
master_host = self . client . query ( )
for provider_part in master_profile [ 'provider' ] . split ( ':' ) :
master_host = master_host [ provider_part ]
master_host = master_host [ master_name ] [ master_profile . get ( 'ssh_interface' , 'public_ips' ) ]
if not master_host :
raise SaltCloudSystemExit ( 'Could not get the hostname of master {}.' . format ( master_name ) )
# Otherwise , deploy it as a new master
else :
master_minion_name = master_name
log . debug ( 'Creating new master \'%s\'' , master_name )
if salt . config . get_cloud_config_value ( 'deploy' , master_profile , self . opts ) is False :
raise SaltCloudSystemExit ( 'Cannot proceed with \'make_master\' when salt deployment ' 'is disabled(ex: --no-deploy).' )
# Generate the master keys
log . debug ( 'Generating master keys for \'%s\'' , master_profile [ 'name' ] )
priv , pub = salt . utils . cloud . gen_keys ( salt . config . get_cloud_config_value ( 'keysize' , master_profile , self . opts ) )
master_profile [ 'master_pub' ] = pub
master_profile [ 'master_pem' ] = priv
# Generate the fingerprint of the master pubkey in order to
# mitigate man - in - the - middle attacks
master_temp_pub = salt . utils . files . mkstemp ( )
with salt . utils . files . fopen ( master_temp_pub , 'w' ) as mtp :
mtp . write ( pub )
master_finger = salt . utils . crypt . pem_finger ( master_temp_pub , sum_type = self . opts [ 'hash_type' ] )
os . unlink ( master_temp_pub )
if master_profile . get ( 'make_minion' , True ) is True :
master_profile . setdefault ( 'minion' , { } )
if 'id' in master_profile [ 'minion' ] :
master_minion_name = master_profile [ 'minion' ] [ 'id' ]
# Set this minion ' s master as local if the user has not set it
if 'master' not in master_profile [ 'minion' ] :
master_profile [ 'minion' ] [ 'master' ] = '127.0.0.1'
if master_finger is not None :
master_profile [ 'master_finger' ] = master_finger
# Generate the minion keys to pre - seed the master :
for name , profile in create_list :
make_minion = salt . config . get_cloud_config_value ( 'make_minion' , profile , self . opts , default = True )
if make_minion is False :
continue
log . debug ( 'Generating minion keys for \'%s\'' , profile [ 'name' ] )
priv , pub = salt . utils . cloud . gen_keys ( salt . config . get_cloud_config_value ( 'keysize' , profile , self . opts ) )
profile [ 'pub_key' ] = pub
profile [ 'priv_key' ] = priv
# Store the minion ' s public key in order to be pre - seeded in
# the master
master_profile . setdefault ( 'preseed_minion_keys' , { } )
master_profile [ 'preseed_minion_keys' ] . update ( { name : pub } )
local_master = False
if master_profile [ 'minion' ] . get ( 'local_master' , False ) and master_profile [ 'minion' ] . get ( 'master' , None ) is not None : # The minion is explicitly defining a master and it ' s
# explicitly saying it ' s the local one
local_master = True
out = self . create ( master_profile , local_master = local_master )
if not isinstance ( out , dict ) :
log . debug ( 'Master creation details is not a dictionary: %s' , out )
elif 'Errors' in out :
raise SaltCloudSystemExit ( 'An error occurred while creating the master, not ' 'continuing: {0}' . format ( out [ 'Errors' ] ) )
deploy_kwargs = ( self . opts . get ( 'show_deploy_args' , False ) is True and # Get the needed data
out . get ( 'deploy_kwargs' , { } ) or # Strip the deploy _ kwargs from the returned data since we don ' t
# want it shown in the console .
out . pop ( 'deploy_kwargs' , { } ) )
master_host = deploy_kwargs . get ( 'salt_host' , deploy_kwargs . get ( 'host' , None ) )
if master_host is None :
raise SaltCloudSystemExit ( 'Host for new master {0} was not found, ' 'aborting map' . format ( master_name ) )
output [ master_name ] = out
else :
log . debug ( 'No make_master found in map' )
# Local master ?
# Generate the fingerprint of the master pubkey in order to
# mitigate man - in - the - middle attacks
master_pub = os . path . join ( self . opts [ 'pki_dir' ] , 'master.pub' )
if os . path . isfile ( master_pub ) :
master_finger = salt . utils . crypt . pem_finger ( master_pub , sum_type = self . opts [ 'hash_type' ] )
opts = self . opts . copy ( )
if self . opts [ 'parallel' ] : # Force display _ ssh _ output to be False since the console will
# need to be reset afterwards
log . info ( 'Since parallel deployment is in use, ssh console output ' 'is disabled. All ssh output will be logged though' )
opts [ 'display_ssh_output' ] = False
local_master = master_name is None
for name , profile in create_list :
if name in ( master_name , master_minion_name ) : # Already deployed , it ' s the master ' s minion
continue
if 'minion' in profile and profile [ 'minion' ] . get ( 'local_master' , False ) and profile [ 'minion' ] . get ( 'master' , None ) is not None : # The minion is explicitly defining a master and it ' s
# explicitly saying it ' s the local one
local_master = True
if master_finger is not None and local_master is False :
profile [ 'master_finger' ] = master_finger
if master_host is not None :
profile . setdefault ( 'minion' , { } )
profile [ 'minion' ] . setdefault ( 'master' , master_host )
if self . opts [ 'parallel' ] :
parallel_data . append ( { 'opts' : opts , 'name' : name , 'profile' : profile , 'local_master' : local_master } )
continue
# Not deploying in parallel
try :
output [ name ] = self . create ( profile , local_master = local_master )
if self . opts . get ( 'show_deploy_args' , False ) is False and 'deploy_kwargs' in output and isinstance ( output [ name ] , dict ) :
output [ name ] . pop ( 'deploy_kwargs' , None )
except SaltCloudException as exc :
log . error ( 'Failed to deploy \'%s\'. Error: %s' , name , exc , exc_info_on_loglevel = logging . DEBUG )
output [ name ] = { 'Error' : str ( exc ) }
for name in dmap . get ( 'destroy' , ( ) ) :
output [ name ] = self . destroy ( name )
if self . opts [ 'parallel' ] and parallel_data :
if 'pool_size' in self . opts :
pool_size = self . opts [ 'pool_size' ]
else :
pool_size = len ( parallel_data )
log . info ( 'Cloud pool size: %s' , pool_size )
output_multip = enter_mainloop ( _create_multiprocessing , parallel_data , pool_size = pool_size )
# We have deployed in parallel , now do start action in
# correct order based on dependencies .
if self . opts [ 'start_action' ] :
actionlist = [ ]
grp = - 1
for key , val in groupby ( six . itervalues ( dmap [ 'create' ] ) , lambda x : x [ 'level' ] ) :
actionlist . append ( [ ] )
grp += 1
for item in val :
actionlist [ grp ] . append ( item [ 'name' ] )
out = { }
for group in actionlist :
log . info ( 'Running %s on %s' , self . opts [ 'start_action' ] , ', ' . join ( group ) )
client = salt . client . get_local_client ( )
out . update ( client . cmd ( ',' . join ( group ) , self . opts [ 'start_action' ] , timeout = self . opts [ 'timeout' ] * 60 , tgt_type = 'list' ) )
for obj in output_multip :
next ( six . itervalues ( obj ) ) [ 'ret' ] = out [ next ( six . iterkeys ( obj ) ) ]
output . update ( obj )
else :
for obj in output_multip :
output . update ( obj )
return output
|
def present ( name , auth = None , ** kwargs ) :
'''Ensure a security group rule exists
defaults : port _ range _ min = None , port _ range _ max = None , protocol = None ,
remote _ ip _ prefix = None , remote _ group _ id = None , direction = ' ingress ' ,
ethertype = ' IPv4 ' , project _ id = None
name
Name of the security group to associate with this rule
project _ name
Name of the project associated with the security group
protocol
The protocol that is matched by the security group rule .
Valid values are None , tcp , udp , and icmp .'''
|
ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : '' }
kwargs = __utils__ [ 'args.clean_kwargs' ] ( ** kwargs )
__salt__ [ 'neutronng.setup_clouds' ] ( auth )
if 'project_name' in kwargs :
kwargs [ 'project_id' ] = kwargs [ 'project_name' ]
del kwargs [ 'project_name' ]
project = __salt__ [ 'keystoneng.project_get' ] ( name = kwargs [ 'project_id' ] )
if project is None :
ret [ 'result' ] = False
ret [ 'comment' ] = "Project does not exist"
return ret
secgroup = __salt__ [ 'neutronng.security_group_get' ] ( name = name , filters = { 'tenant_id' : project . id } )
if secgroup is None :
ret [ 'result' ] = False
ret [ 'changes' ] = { } ,
ret [ 'comment' ] = 'Security Group does not exist {}' . format ( name )
return ret
# we have to search through all secgroup rules for a possible match
rule_exists = None
for rule in secgroup [ 'security_group_rules' ] :
if _rule_compare ( rule , kwargs ) is True :
rule_exists = True
if rule_exists is None :
if __opts__ [ 'test' ] is True :
ret [ 'result' ] = None
ret [ 'changes' ] = kwargs
ret [ 'comment' ] = 'Security Group rule will be created.'
return ret
# The variable differences are a little clumsy right now
kwargs [ 'secgroup_name_or_id' ] = secgroup
new_rule = __salt__ [ 'neutronng.security_group_rule_create' ] ( ** kwargs )
ret [ 'changes' ] = new_rule
ret [ 'comment' ] = 'Created security group rule'
return ret
return ret
|
async def ehlo ( self , from_host = None ) :
"""Sends a SMTP ' EHLO ' command . - Identifies the client and starts the
session .
If given ` ` from ` _ host ` ` is None , defaults to the client FQDN .
For further details , please check out ` RFC 5321 § 4.1.1.1 ` _ .
Args :
from _ host ( str or None ) : Name to use to identify the client .
Raises :
ConnectionResetError : If the connection with the server is
unexpectedely lost .
SMTPCommandFailedError : If the server refuses our EHLO greeting .
Returns :
( int , str ) : A ( code , message ) 2 - tuple containing the server
response .
. . _ ` RFC 5321 § 4.1.1.1 ` : https : / / tools . ietf . org / html / rfc5321 # section - 4.1.1.1"""
|
if from_host is None :
from_host = self . fqdn
code , message = await self . do_cmd ( "EHLO" , from_host )
self . last_ehlo_response = ( code , message )
extns , auths = SMTP . parse_esmtp_extensions ( message )
self . esmtp_extensions = extns
self . auth_mechanisms = auths
self . supports_esmtp = True
return code , message
|
def add_user_rating ( self , item_type , item_id , item_rating ) :
"""Adds the rating for the item indicated for the current user .
: param item _ type : One of : series , episode , banner .
: param item _ id : The TheTVDB id of the item .
: param item _ rating : The rating from 0 to 10.
: return :"""
|
raw_response = requests_util . run_request ( 'put' , self . API_BASE_URL + '/user/ratings/%s/%d/%d' % ( item_type , item_id , item_rating ) , headers = self . __get_header_with_auth ( ) )
return self . parse_raw_response ( raw_response )
|
def run ( inputs , program , outputs ) :
"""Creates temp symlink tree , runs program , and copies back outputs .
Args :
inputs : List of fake paths to real paths , which are used for symlink tree .
program : List containing real path of program and its arguments . The
execroot directory will be appended as the last argument .
outputs : List of fake outputted paths to copy back to real paths .
Returns :
0 if succeeded or nonzero if failed ."""
|
root = tempfile . mkdtemp ( )
try :
cwd = os . getcwd ( )
for fake , real in inputs :
parent = os . path . join ( root , os . path . dirname ( fake ) )
if not os . path . exists ( parent ) :
os . makedirs ( parent )
# Use symlink if possible and not on Windows , since on Windows 10
# symlinks exist but they require administrator privileges to use .
if hasattr ( os , 'symlink' ) and not os . name == 'nt' :
os . symlink ( os . path . join ( cwd , real ) , os . path . join ( root , fake ) )
else :
shutil . copyfile ( os . path . join ( cwd , real ) , os . path . join ( root , fake ) )
if subprocess . call ( program + [ root ] ) != 0 :
return 1
for fake , real in outputs :
shutil . copyfile ( os . path . join ( root , fake ) , real )
return 0
finally :
try :
shutil . rmtree ( root )
except EnvironmentError : # Ignore " file in use " errors on Windows ; ok since it ' s just a tmpdir .
pass
|
def create_shipping_address ( self , shipping_address ) :
"""Creates a shipping address on an existing account . If you are
creating an account , you can embed the shipping addresses with the
request"""
|
url = urljoin ( self . _url , '/shipping_addresses' )
return shipping_address . post ( url )
|
def should_do_final_get ( self ) :
"""Check whether the polling should end doing a final GET .
: param requests . Response response : latest REST call response .
: rtype : bool"""
|
return ( ( self . async_url or not self . resource ) and self . method in { 'PUT' , 'PATCH' } ) or ( self . lro_options [ 'final-state-via' ] == _LOCATION_FINAL_STATE and self . location_url and self . async_url and self . method == 'POST' )
|
def fit ( self , X , y = None , ** fit_params ) :
"""Fit the model
Fit all the transforms one after the other and transform the
data , then fit the transformed data using the final estimator .
Parameters
X : iterable
Training data . Must fulfill input requirements of first step of the
pipeline .
y : iterable , default = None
Training targets . Must fulfill label requirements for all steps of
the pipeline .
* * fit _ params : dict of string - > object
Parameters passed to the ` ` fit ` ` method of each step , where
each parameter name is prefixed such that parameter ` ` p ` ` for step
` ` s ` ` has key ` ` s _ _ p ` ` .
Returns
self : Pipeline
This estimator"""
|
Xt , yt , fit_params = self . _fit ( X , y , ** fit_params )
self . N_train = len ( yt )
if self . _final_estimator is not None :
fitres = self . _final_estimator . fit ( Xt , yt , ** fit_params )
if hasattr ( fitres , 'history' ) :
self . history = fitres
return self
|
def calculate_heartbeats ( shb , chb ) :
"""Given a heartbeat string from the server , and a heartbeat tuple from the client ,
calculate what the actual heartbeat settings should be .
: param ( str , str ) shb : server heartbeat numbers
: param ( int , int ) chb : client heartbeat numbers
: rtype : ( int , int )"""
|
( sx , sy ) = shb
( cx , cy ) = chb
x = 0
y = 0
if cx != 0 and sy != '0' :
x = max ( cx , int ( sy ) )
if cy != 0 and sx != '0' :
y = max ( cy , int ( sx ) )
return x , y
|
def add_to_manifest ( self , manifest ) :
"""Add useful details to the manifest about this service
so that it can be used in an application .
: param manifest : An predix . admin . app . Manifest object
instance that manages reading / writing manifest config
for a cloud foundry app ."""
|
# Add this service to list of services
manifest . add_service ( self . service . name )
# Add environment variables
zone_id = predix . config . get_env_key ( self . use_class , 'zone_id' )
manifest . add_env_var ( zone_id , self . service . settings . data [ 'zone' ] [ 'http-header-value' ] )
uri = predix . config . get_env_key ( self . use_class , 'uri' )
manifest . add_env_var ( uri , self . service . settings . data [ 'uri' ] )
manifest . write_manifest ( )
|
def create_fct_file ( self ) :
'''Emits examples in fct format .'''
|
fct_str = ''
fct_str += self . fct_rec ( self . db . target_table )
return fct_str
|
def delete ( self , adjustEstimate = None , newEstimate = None , increaseBy = None ) :
"""Delete this worklog entry from its associated issue .
: param adjustEstimate : one of ` ` new ` ` , ` ` leave ` ` , ` ` manual ` ` or ` ` auto ` ` .
` ` auto ` ` is the default and adjusts the estimate automatically .
` ` leave ` ` leaves the estimate unchanged by this deletion .
: param newEstimate : combined with ` ` adjustEstimate = new ` ` , set the estimate to this value
: param increaseBy : combined with ` ` adjustEstimate = manual ` ` , increase the remaining estimate by this amount"""
|
params = { }
if adjustEstimate is not None :
params [ 'adjustEstimate' ] = adjustEstimate
if newEstimate is not None :
params [ 'newEstimate' ] = newEstimate
if increaseBy is not None :
params [ 'increaseBy' ] = increaseBy
super ( Worklog , self ) . delete ( params )
|
def handle_argv ( self , prog_name , argv ) :
"""Parses command line arguments from ` ` argv ` ` and dispatches
to : meth : ` run ` .
: param prog _ name : The program name ( ` ` argv [ 0 ] ` ` ) .
: param argv : Command arguments ."""
|
options , args = self . parse_options ( prog_name , argv )
return self . run ( * args , ** vars ( options ) )
|
def set_env ( user , name , value = None ) :
'''Set up an environment variable in the crontab .
CLI Example :
. . code - block : : bash
salt ' * ' cron . set _ env root MAILTO user @ example . com'''
|
lst = list_tab ( user )
for env in lst [ 'env' ] :
if name == env [ 'name' ] :
if value != env [ 'value' ] :
rm_env ( user , name )
jret = set_env ( user , name , value )
if jret == 'new' :
return 'updated'
else :
return jret
return 'present'
env = { 'name' : name , 'value' : value }
lst [ 'env' ] . append ( env )
comdat = _write_cron_lines ( user , _render_tab ( lst ) )
if comdat [ 'retcode' ] : # Failed to commit , return the error
return comdat [ 'stderr' ]
return 'new'
|
def get_word_before_cursor ( self , WORD = False ) :
"""Give the word before the cursor .
If we have whitespace before the cursor this returns an empty string ."""
|
if self . text_before_cursor [ - 1 : ] . isspace ( ) :
return ''
else :
return self . text_before_cursor [ self . find_start_of_previous_word ( WORD = WORD ) : ]
|
def add_to_stage ( self , paths ) :
"""Stage given files
: param paths :
: return :"""
|
cmd = self . _command . add ( paths )
( code , stdout , stderr ) = self . _exec ( cmd )
if code :
raise errors . VCSError ( 'Can\'t add paths to VCS. Process exited with code %d and message: %s' % ( code , stderr + stdout ) )
|
def make_gettext_patterns ( ) :
"Strongly inspired from idlelib . ColorDelegator . make _ pat"
|
kwstr = 'msgid msgstr'
kw = r"\b" + any ( "keyword" , kwstr . split ( ) ) + r"\b"
fuzzy = any ( "builtin" , [ r"#,[^\n]*" ] )
links = any ( "normal" , [ r"#:[^\n]*" ] )
comment = any ( "comment" , [ r"#[^\n]*" ] )
number = any ( "number" , [ r"\b[+-]?[0-9]+[lL]?\b" , r"\b[+-]?0[xX][0-9A-Fa-f]+[lL]?\b" , r"\b[+-]?[0-9]+(?:\.[0-9]+)?(?:[eE][+-]?[0-9]+)?\b" ] )
sqstring = r"(\b[rRuU])?'[^'\\\n]*(\\.[^'\\\n]*)*'?"
dqstring = r'(\b[rRuU])?"[^"\\\n]*(\\.[^"\\\n]*)*"?'
string = any ( "string" , [ sqstring , dqstring ] )
return "|" . join ( [ kw , string , number , fuzzy , links , comment , any ( "SYNC" , [ r"\n" ] ) ] )
|
def _preprocess_sqlite_view ( asql_query , library , backend , connection ) :
"""Finds view or materialized view in the asql query and converts it to create table / insert rows .
Note :
Assume virtual tables for all partitions already created .
Args :
asql _ query ( str ) : asql query
library ( ambry . Library ) :
backend ( SQLiteBackend ) :
connection ( apsw . Connection ) :
Returns :
str : valid sql query containing create table and insert into queries if asql _ query contains
' create materialized view ' . If asql _ query does not contain ' create materialized view ' returns
asql _ query as is ."""
|
new_query = None
if 'create materialized view' in asql_query . lower ( ) or 'create view' in asql_query . lower ( ) :
logger . debug ( '_preprocess_sqlite_view: materialized view found.\n asql query: {}' . format ( asql_query ) )
view = parse_view ( asql_query )
tablename = view . name . replace ( '-' , '_' ) . lower ( ) . replace ( '.' , '_' )
create_query_columns = { }
for column in view . columns :
create_query_columns [ column . name ] = column . alias
ref_to_partition_map = { }
# key is ref found in the query , value is Partition instance .
alias_to_partition_map = { }
# key is alias of ref found in the query , value is Partition instance .
# collect sources from select statement of the view .
for source in view . sources :
partition = library . partition ( source . name )
ref_to_partition_map [ source . name ] = partition
if source . alias :
alias_to_partition_map [ source . alias ] = partition
# collect sources from joins of the view .
for join in view . joins :
partition = library . partition ( join . source . name )
ref_to_partition_map [ join . source . name ] = partition
if join . source . alias :
alias_to_partition_map [ join . source . alias ] = partition
# collect and convert columns .
TYPE_MAP = { 'int' : 'INTEGER' , 'float' : 'REAL' , six . binary_type . __name__ : 'TEXT' , six . text_type . __name__ : 'TEXT' , 'date' : 'DATE' , 'datetime' : 'TIMESTAMP WITHOUT TIME ZONE' }
column_types = [ ]
column_names = [ ]
for column in view . columns :
if '.' in column . name :
source_alias , column_name = column . name . split ( '.' )
else : # TODO : Test that case .
source_alias = None
column_name = column . name
# find column specification in the mpr file .
if source_alias :
partition = alias_to_partition_map [ source_alias ]
for part_column in partition . datafile . reader . columns :
if part_column [ 'name' ] == column_name :
sqlite_type = TYPE_MAP . get ( part_column [ 'type' ] )
if not sqlite_type :
raise Exception ( 'Do not know how to convert {} to sql column.' . format ( column [ 'type' ] ) )
column_types . append ( ' {} {}' . format ( column . alias if column . alias else column . name , sqlite_type ) )
column_names . append ( column . alias if column . alias else column . name )
column_types_str = ',\n' . join ( column_types )
column_names_str = ', ' . join ( column_names )
create_query = 'CREATE TABLE IF NOT EXISTS {}(\n{});' . format ( tablename , column_types_str )
# drop ' create materialized view ' part
_ , select_part = asql_query . split ( view . name )
select_part = select_part . strip ( )
assert select_part . lower ( ) . startswith ( 'as' )
# drop ' as ' keyword
select_part = select_part . strip ( ) [ 2 : ] . strip ( )
assert select_part . lower ( ) . strip ( ) . startswith ( 'select' )
# Create query to copy data from mpr to just created table .
copy_query = 'INSERT INTO {table}(\n{columns})\n {select}' . format ( table = tablename , columns = column_names_str , select = select_part )
if not copy_query . strip ( ) . lower ( ) . endswith ( ';' ) :
copy_query = copy_query + ';'
new_query = '{}\n\n{}' . format ( create_query , copy_query )
logger . debug ( '_preprocess_sqlite_view: preprocess finished.\n asql query: {}\n\n new query: {}' . format ( asql_query , new_query ) )
return new_query or asql_query
|
def module_refresh ( self , force_refresh = False , notify = False ) :
'''Refresh the functions and returners .'''
|
log . debug ( 'Refreshing modules. Notify=%s' , notify )
self . functions , self . returners , _ , self . executors = self . _load_modules ( force_refresh , notify = notify )
self . schedule . functions = self . functions
self . schedule . returners = self . returners
|
def _utilized ( n , node , other_attrs , unsuppressedPrefixes ) :
'''_ utilized ( n , node , other _ attrs , unsuppressedPrefixes ) - > boolean
Return true if that nodespace is utilized within the node'''
|
if n . startswith ( 'xmlns:' ) :
n = n [ 6 : ]
elif n . startswith ( 'xmlns' ) :
n = n [ 5 : ]
if ( n == "" and node . prefix in [ "#default" , None ] ) or n == node . prefix or n in unsuppressedPrefixes :
return 1
for attr in other_attrs :
if n == attr . prefix :
return 1
# For exclusive need to look at attributes
if unsuppressedPrefixes is not None :
for attr in _attrs ( node ) :
if n == attr . prefix :
return 1
return 0
|
def validate ( self , schema ) :
"""Validate VDOM against given JSON Schema
Raises ValidationError if schema does not match"""
|
try :
validate ( instance = self . to_dict ( ) , schema = schema , cls = Draft4Validator )
except ValidationError as e :
raise ValidationError ( _validate_err_template . format ( VDOM_SCHEMA , e ) )
|
def getRecentlyUpdatedSets ( self , minutesAgo ) :
'''Gets the information of recently updated sets .
: param int minutesAgo : The amount of time ago that the set was updated .
: returns : A list of Build instances that were updated within the given time .
: rtype : list
. . warning : : An empty list will be returned if there are no sets in the given time limit .'''
|
params = { 'apiKey' : self . apiKey , 'minutesAgo' : minutesAgo }
url = Client . ENDPOINT . format ( 'getRecentlyUpdatedSets' )
returned = get ( url , params = params )
self . checkResponse ( returned )
# Parse them in to build objects
root = ET . fromstring ( returned . text )
return [ Build ( i , self ) for i in root ]
|
def _new_conn ( self ) :
"""Establish a new connection via the SOCKS proxy ."""
|
extra_kw = { }
if self . source_address :
extra_kw [ 'source_address' ] = self . source_address
if self . socket_options :
extra_kw [ 'socket_options' ] = self . socket_options
try :
conn = socks . create_connection ( ( self . host , self . port ) , proxy_type = self . _socks_options [ 'socks_version' ] , proxy_addr = self . _socks_options [ 'proxy_host' ] , proxy_port = self . _socks_options [ 'proxy_port' ] , proxy_username = self . _socks_options [ 'username' ] , proxy_password = self . _socks_options [ 'password' ] , proxy_rdns = self . _socks_options [ 'rdns' ] , timeout = self . timeout , ** extra_kw )
except SocketTimeout as e :
raise ConnectTimeoutError ( self , "Connection to %s timed out. (connect timeout=%s)" % ( self . host , self . timeout ) )
except socks . ProxyError as e : # This is fragile as hell , but it seems to be the only way to raise
# useful errors here .
if e . socket_err :
error = e . socket_err
if isinstance ( error , SocketTimeout ) :
raise ConnectTimeoutError ( self , "Connection to %s timed out. (connect timeout=%s)" % ( self . host , self . timeout ) )
else :
raise NewConnectionError ( self , "Failed to establish a new connection: %s" % error )
else :
raise NewConnectionError ( self , "Failed to establish a new connection: %s" % e )
except SocketError as e : # Defensive : PySocks should catch all these .
raise NewConnectionError ( self , "Failed to establish a new connection: %s" % e )
return conn
|
def _update_field ( self , natvalue ) :
"""Update this NATValue if values are different
: rtype : bool"""
|
updated = False
if natvalue . element and natvalue . element != self . element :
self . update ( element = natvalue . element )
self . pop ( 'ip_descriptor' , None )
updated = True
elif natvalue . ip_descriptor and self . ip_descriptor and natvalue . ip_descriptor != self . ip_descriptor :
self . update ( ip_descriptor = natvalue . ip_descriptor )
self . pop ( 'element' , None )
updated = True
for port in ( 'min_port' , 'max_port' ) :
_port = getattr ( natvalue , port , None )
if _port is not None and getattr ( self , port , None ) != _port :
self [ port ] = _port
updated = True
return updated
|
def combine_dictionaries ( dicts : List [ Dict [ str , Any ] ] ) -> Dict [ str , Any ] :
"""Merge a list of dictionaries into a single dictionary .
Where there are collisions the first value in the list will be set
as this function is using ChainMap to combine the dicts ."""
|
return dict ( ChainMap ( * dicts ) )
|
def register_piece ( self , from_address , to_address , hash , password , min_confirmations = 6 , sync = False , ownership = True ) :
"""Register a piece
Args :
from _ address ( Tuple [ str ] ) : Federation address . All register transactions
originate from the the Federation wallet
to _ address ( str ) : Address registering the edition
hash ( Tuple [ str ] ) : Hash of the piece . ( file _ hash , file _ hash _ metadata )
password ( str ) : Federation wallet password . For signing the transaction
edition _ num ( int ) : The number of the edition to register . User
edition _ num = 0 to register the master edition
min _ confirmations ( int ) : Override the number of confirmations when
chosing the inputs of the transaction . Defaults to 6
sync ( bool ) : Perform the transaction in synchronous mode , the call to the
function will block until there is at least on confirmation on
the blockchain . Defaults to False
ownership ( bool ) : Check ownsership in the blockchain before pushing the
transaction . Defaults to True
Returns :
str : transaction id"""
|
file_hash , file_hash_metadata = hash
path , from_address = from_address
verb = Spoolverb ( )
unsigned_tx = self . simple_spool_transaction ( from_address , [ file_hash , file_hash_metadata , to_address ] , op_return = verb . piece , min_confirmations = min_confirmations )
signed_tx = self . _t . sign_transaction ( unsigned_tx , password )
txid = self . _t . push ( signed_tx )
return txid
|
def _zforce ( self , R , z , phi = 0. , t = 0. , v = None ) :
"""NAME :
_ zforce
PURPOSE :
evaluate the vertical force for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
v = current velocity in cylindrical coordinates
OUTPUT :
the vertical force
HISTORY :
2018-03-18 - Started - Bovy ( UofT )"""
|
new_hash = hashlib . md5 ( numpy . array ( [ R , phi , z , v [ 0 ] , v [ 1 ] , v [ 2 ] , t ] ) ) . hexdigest ( )
if new_hash != self . _force_hash :
self . _calc_force ( R , phi , z , v , t )
return self . _cached_force * v [ 2 ]
|
def faz ( input_file , variables = None ) :
"""FAZ entry point ."""
|
logging . debug ( "input file:\n {0}\n" . format ( input_file ) )
tasks = parse_input_file ( input_file , variables = variables )
print ( "Found {0} tasks." . format ( len ( tasks ) ) )
graph = DependencyGraph ( tasks )
graph . show_tasks ( )
graph . execute ( )
|
async def create ( cls , fsm_context : FSMContext ) :
""": param fsm _ context :
: return :"""
|
proxy = cls ( fsm_context )
await proxy . load ( )
return proxy
|
def size ( self ) :
"""Returns the sizes of the groups as series .
Returns :
TYPE : Description"""
|
if len ( self . grouping_column_types ) > 1 :
index_type = WeldStruct ( [ self . grouping_column_types ] )
# Figure out what to use for multi - key index name
# index _ name = ? ?
else :
index_type = self . grouping_column_types [ 0 ]
index_name = self . grouping_column_names [ 0 ]
return SeriesWeld ( grizzly_impl . groupby_size ( self . columns , self . column_types , self . grouping_columns , self . grouping_column_types ) , WeldLong ( ) , index_type = index_type , index_name = index_name )
|
def _add ( self , uri , methods , handler , host = None ) :
"""Add a handler to the route list
: param uri : path to match
: param methods : sequence of accepted method names . If none are
provided , any method is allowed
: param handler : request handler function .
When executed , it should provide a response object .
: return : Nothing"""
|
if host is not None :
if isinstance ( host , str ) :
uri = host + uri
self . hosts . add ( host )
else :
if not isinstance ( host , Iterable ) :
raise ValueError ( "Expected either string or Iterable of " "host strings, not {!r}" . format ( host ) )
for host_ in host :
self . add ( uri , methods , handler , host_ )
return
# Dict for faster lookups of if method allowed
if methods :
methods = frozenset ( methods )
parameters = [ ]
properties = { "unhashable" : None }
def add_parameter ( match ) :
name = match . group ( 1 )
name , _type , pattern = self . parse_parameter_string ( name )
parameter = Parameter ( name = name , cast = _type )
parameters . append ( parameter )
# Mark the whole route as unhashable if it has the hash key in it
if re . search ( r'(^|[^^]){1}/' , pattern ) :
properties [ 'unhashable' ] = True
# Mark the route as unhashable if it matches the hash key
elif re . search ( r'/' , pattern ) :
properties [ 'unhashable' ] = True
return '({})' . format ( pattern )
pattern_string = re . sub ( self . parameter_pattern , add_parameter , uri )
pattern = re . compile ( r'^{}$' . format ( pattern_string ) )
def merge_route ( route , methods , handler ) : # merge to the existing route when possible .
if not route . methods or not methods : # method - unspecified routes are not mergeable .
raise RouteExists ( "Route already registered: {}" . format ( uri ) )
elif route . methods . intersection ( methods ) : # already existing method is not overloadable .
duplicated = methods . intersection ( route . methods )
raise RouteExists ( "Route already registered: {} [{}]" . format ( uri , ',' . join ( list ( duplicated ) ) ) )
if isinstance ( route . handler , self . _composition_view_class ) :
view = route . handler
else :
view = self . _composition_view_class ( )
view . add ( route . methods , route . handler )
view . add ( methods , handler )
route = route . _replace ( handler = view , methods = methods . union ( route . methods ) )
return route
if parameters : # TODO : This is too complex , we need to reduce the complexity
if properties [ 'unhashable' ] :
routes_to_check = self . routes_always_check
ndx , route = self . check_dynamic_route_exists ( pattern , routes_to_check )
else :
routes_to_check = self . routes_dynamic [ url_hash ( uri ) ]
ndx , route = self . check_dynamic_route_exists ( pattern , routes_to_check )
if ndx != - 1 : # Pop the ndx of the route , no dups of the same route
routes_to_check . pop ( ndx )
else :
route = self . routes_all . get ( uri )
if route :
route = merge_route ( route , methods , handler )
else : # prefix the handler name with the blueprint name
# if available
if hasattr ( handler , '__blueprintname__' ) :
handler_name = '{}.{}' . format ( handler . __blueprintname__ , handler . __name__ )
else :
handler_name = getattr ( handler , '__name__' , None )
route = Route ( handler = handler , methods = methods , pattern = pattern , parameters = parameters , name = handler_name , uri = uri )
self . routes_all [ uri ] = route
if properties [ 'unhashable' ] :
self . routes_always_check . append ( route )
elif parameters :
self . routes_dynamic [ url_hash ( uri ) ] . append ( route )
else :
self . routes_static [ uri ] = route
|
def set_memcached_backend ( self , config ) :
"""Select the most suitable Memcached backend based on the config and
on what ' s installed"""
|
# This is the preferred backend as it is the fastest and most fully
# featured , so we use this by default
config [ 'BACKEND' ] = 'django_pylibmc.memcached.PyLibMCCache'
if is_importable ( config [ 'BACKEND' ] ) :
return
# Otherwise , binary connections can use this pure Python implementation
if config . get ( 'BINARY' ) and is_importable ( 'django_bmemcached' ) :
config [ 'BACKEND' ] = 'django_bmemcached.memcached.BMemcached'
return
# For text - based connections without any authentication we can fall
# back to Django ' s core backends if the supporting libraries are
# installed
if not any ( [ config . get ( key ) for key in ( 'BINARY' , 'USERNAME' , 'PASSWORD' ) ] ) :
if is_importable ( 'pylibmc' ) :
config [ 'BACKEND' ] = 'django.core.cache.backends.memcached.PyLibMCCache'
elif is_importable ( 'memcached' ) :
config [ 'BACKEND' ] = 'django.core.cache.backends.memcached.MemcachedCache'
|
def define_operators ( cls , operators ) :
"""Bind operators to specified functions for the scope of the context :
Example
model = Model ( )
other = Model ( )
with Model . define _ operators ( { " + " : lambda self , other : " plus " } ) :
print ( model + other )
# " plus "
print ( model + other )
# Raises TypeError - - - binding limited to scope of with block ."""
|
old_ops = dict ( cls . _operators )
for op , func in operators . items ( ) :
cls . _operators [ op ] = func
yield
cls . _operators = old_ops
|
def wait_for_instance ( instance ) :
"""wait for instance status to be ' running ' in which case return True , False otherwise"""
|
status = None
print ( "getting status for instance {} ..." . format ( instance . id ) )
while status is None :
try :
status = instance . update ( )
if status is None :
time . sleep ( 2 )
except EC2ResponseError :
time . sleep ( 2 )
print ( "waiting for instance {} ..." . format ( instance . id ) )
while status == "pending" :
time . sleep ( 2 )
status = instance . update ( )
if status != "running" :
print ( "Invalid status when starting instance {}: {}" . format ( instance . id , status ) )
return False
print ( "New instance {} started: {}" . format ( instance . id , instance . ip_address ) )
return True
|
def push ( self , my_dict , key , element ) :
'''Push an element onto an array that may not have been defined in
the dict'''
|
group_info = my_dict . setdefault ( key , [ ] )
if isinstance ( group_info , dict ) :
host_list = group_info . setdefault ( 'hosts' , [ ] )
host_list . append ( element )
else :
group_info . append ( element )
|
def generate_menu ( ) :
"""Generate ` ` menu ` ` with the rebuild link .
: return : HTML fragment
: rtype : str"""
|
if db is not None :
needs_rebuild = db . get ( 'site:needs_rebuild' )
else :
needs_rebuild = site . coil_needs_rebuild
if needs_rebuild not in ( u'0' , u'-1' , b'0' , b'-1' ) :
return ( '</li><li><a href="{0}"><i class="fa fa-fw ' 'fa-warning"></i> <strong>Rebuild</strong></a></li>' . format ( url_for ( 'rebuild' ) ) )
else :
return ( '</li><li><a href="{0}"><i class="fa fa-fw ' 'fa-cog"></i> Rebuild</a></li>' . format ( url_for ( 'rebuild' ) ) )
|
def itoint ( f , G , y0 , tspan ) :
"""Numerically integrate the Ito equation dy = f ( y , t ) dt + G ( y , t ) dW
where y is the d - dimensional state vector , f is a vector - valued function ,
G is an d x m matrix - valued function giving the noise coefficients and
dW ( t ) = ( dW _ 1 , dW _ 2 , . . . dW _ m ) is a vector of independent Wiener increments
Args :
f : callable ( y , t ) returning a numpy array of shape ( d , )
Vector - valued function to define the deterministic part of the system
G : callable ( y , t ) returning a numpy array of shape ( d , m )
Matrix - valued function to define the noise coefficients of the system
y0 : array of shape ( d , ) giving the initial state vector y ( t = = 0)
tspan ( array ) : The sequence of time points for which to solve for y .
These must be equally spaced , e . g . np . arange ( 0,10,0.005)
tspan [ 0 ] is the intial time corresponding to the initial state y0.
Returns :
y : array , with shape ( len ( tspan ) , len ( y0 ) )
With the initial value y0 in the first row
Raises :
SDEValueError"""
|
# In future versions we can automatically choose here the most suitable
# Ito algorithm based on properties of the system and noise .
( d , m , f , G , y0 , tspan , __ , __ ) = _check_args ( f , G , y0 , tspan , None , None )
chosenAlgorithm = itoSRI2
return chosenAlgorithm ( f , G , y0 , tspan )
|
def getnamedargs ( * args , ** kwargs ) :
"""allows you to pass a dict and named args
so you can pass ( { ' a ' : 5 , ' b ' : 3 } , c = 8 ) and get
dict ( a = 5 , b = 3 , c = 8)"""
|
adict = { }
for arg in args :
if isinstance ( arg , dict ) :
adict . update ( arg )
adict . update ( kwargs )
return adict
|
def taxonomy_from_node_name ( self , node_name ) :
'''return the taxonomy incorporated at a particular node , or None
if it does not encode any taxonomy
Parameters
node _ name : str
a node label .
Returns
Taxonomy as a string , or None if there is no taxonomy'''
|
def is_float ( s ) :
try :
float ( s )
return True
except ValueError :
return False
if node_name is None :
return None
elif is_float ( node_name ) : # no name , just a bootstrap
return None
else :
bootstrap_regex = re . compile ( r'[\d\.]+:(.*)' )
reg = bootstrap_regex . match ( node_name )
if reg : # bootstrap in name
return reg . groups ( 0 ) [ 0 ]
else : # bootstrap not in name
return node_name
|
def get_jwt_data_from_app_context ( ) :
"""Fetches a dict of jwt token data from the top of the flask app ' s context"""
|
ctx = flask . _app_ctx_stack . top
jwt_data = getattr ( ctx , 'jwt_data' , None )
PraetorianError . require_condition ( jwt_data is not None , """
No jwt_data found in app context.
Make sure @auth_required decorator is specified *first* for route
""" , )
return jwt_data
|
def capture_or_cache ( target_url , user_agent = "savepagenow (https://github.com/pastpages/savepagenow)" ) :
"""Archives the provided URL using archive . org ' s Wayback Machine , unless
the page has been recently captured .
Returns a tuple with the archive . org URL where the capture is stored ,
along with a boolean indicating if a new capture was conducted .
If the boolean is True , archive . org conducted a new capture . If it is False ,
archive . org has returned a recently cached capture instead , likely taken
in the previous minutes ."""
|
try :
return capture ( target_url , user_agent = user_agent , accept_cache = False ) , True
except CachedPage :
return capture ( target_url , user_agent = user_agent , accept_cache = True ) , False
|
def eager_partial_regardless ( self , fn , * a , ** kw ) :
"""Like ` eager _ partial ` , but applies if callable is not annotated ."""
|
if self . has_annotations ( fn ) :
return self . eager_partial ( fn , * a , ** kw )
return functools . partial ( fn , * a , ** kw )
|
def _set_sflow_profile ( self , v , load = False ) :
"""Setter method for sflow _ profile , mapped from YANG variable / sflow _ profile ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ sflow _ profile is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ sflow _ profile ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "profile_name" , sflow_profile . sflow_profile , yang_name = "sflow-profile" , rest_name = "sflow-profile" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'profile-name' , extensions = { u'tailf-common' : { u'info' : u'Sflow Profile Configuration' , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'callpoint' : u'SflowProfile' } } ) , is_container = 'list' , yang_name = "sflow-profile" , rest_name = "sflow-profile" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Sflow Profile Configuration' , u'cli-suppress-mode' : None , u'cli-suppress-list-no' : None , u'cli-compact-syntax' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'callpoint' : u'SflowProfile' } } , namespace = 'urn:brocade.com:mgmt:brocade-sflow' , defining_module = 'brocade-sflow' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """sflow_profile must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("profile_name",sflow_profile.sflow_profile, yang_name="sflow-profile", rest_name="sflow-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='profile-name', extensions={u'tailf-common': {u'info': u'Sflow Profile Configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'SflowProfile'}}), is_container='list', yang_name="sflow-profile", rest_name="sflow-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Sflow Profile Configuration', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'SflowProfile'}}, namespace='urn:brocade.com:mgmt:brocade-sflow', defining_module='brocade-sflow', yang_type='list', is_config=True)""" , } )
self . __sflow_profile = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def poll ( self ) :
"""Wait for packets to send to the client ."""
|
queue_empty = self . server . get_queue_empty_exception ( )
try :
packets = [ self . queue . get ( timeout = self . server . ping_timeout ) ]
self . queue . task_done ( )
except queue_empty :
raise exceptions . QueueEmpty ( )
if packets == [ None ] :
return [ ]
while True :
try :
packets . append ( self . queue . get ( block = False ) )
self . queue . task_done ( )
except queue_empty :
break
return packets
|
def upload_from_fs ( fn , profile = None , label = None ) :
"""Saves image from fn with TMP prefix and returns img _ id ."""
|
if not os . path . isfile ( fn ) :
raise ValueError ( 'File is not exists: {}' . format ( fn ) )
if profile is None :
profile = 'default'
conf = get_profile_configs ( profile )
with open ( fn , 'rb' ) as f :
if not is_image ( f , types = conf [ 'TYPES' ] ) :
msg = ( ( 'Format of uploaded file "%(name)s" is not allowed. ' 'Allowed formats is: %(formats)s.' ) % { 'name' : fn , 'formats' : ', ' . join ( map ( lambda t : t . upper ( ) , conf [ 'TYPES' ] ) ) } )
raise RuntimeError ( msg )
return _custom_upload ( f , profile , label , conf )
|
def get_plan ( self , nodes = None ) :
"""Retrieve a plan , e . g . a list of fixtures to be loaded sorted on
dependency .
: param list nodes : list of nodes to be loaded .
: return :"""
|
if nodes :
plan = self . graph . resolve_nodes ( nodes )
else :
plan = self . graph . resolve_node ( )
return plan
|
def solve_sparse ( self , B ) :
"""Solve linear equation of the form A X = B . Where B and X are sparse matrices .
Parameters
B : any scipy . sparse matrix
Right - hand side of the matrix equation .
Note : it will be converted to csc _ matrix via ` . tocsc ( ) ` .
Returns
X : csc _ matrix
Solution to the matrix equation as a csc _ matrix"""
|
B = B . tocsc ( )
cols = list ( )
for j in xrange ( B . shape [ 1 ] ) :
col = self . solve ( B [ : , j ] )
cols . append ( csc_matrix ( col ) )
return hstack ( cols )
|
def update_universe ( id_or_symbols ) :
"""该方法用于更新现在关注的证券的集合 ( e . g . : 股票池 ) 。 PS : 会在下一个bar事件触发时候产生 ( 新的关注的股票池更新 ) 效果 。 并且update _ universe会是覆盖 ( overwrite ) 的操作而不是在已有的股票池的基础上进行增量添加 。 比如已有的股票池为 [ ' 000001 . XSHE ' , ' 000024 . XSHE ' ] 然后调用了update _ universe ( [ ' 000030 . XSHE ' ] ) 之后 , 股票池就会变成000030 . XSHE一个股票了 , 随后的数据更新也只会跟踪000030 . XSHE这一个股票了 。
: param id _ or _ symbols : 标的物
: type id _ or _ symbols : : class : ` ~ Instrument ` object | ` str ` | List [ : class : ` ~ Instrument ` ] | List [ ` str ` ]"""
|
if isinstance ( id_or_symbols , ( six . string_types , Instrument ) ) :
id_or_symbols = [ id_or_symbols ]
order_book_ids = set ( assure_order_book_id ( order_book_id ) for order_book_id in id_or_symbols )
if order_book_ids != Environment . get_instance ( ) . get_universe ( ) :
Environment . get_instance ( ) . update_universe ( order_book_ids )
|
def list ( self , source_ids = None , seniority = "all" , stage = None , date_start = "1494539999" , date_end = TIMESTAMP_NOW , filter_id = None , page = 1 , limit = 30 , sort_by = 'ranking' , filter_reference = None , order_by = None ) :
"""Retreive all profiles that match the query param .
Args :
date _ end : < string > REQUIRED ( default to timestamp of now )
profiles ' last date of reception
date _ start : < string > REQUIRED ( default to " 1494539999 " )
profiles ' first date of reception
filter _ id : < string >
limit : < int > ( default to 30)
number of fetched profiles / page
page : < int > REQUIRED default to 1
number of the page associated to the pagination
seniority : < string > defaut to " all "
profiles ' seniority ( " all " , " senior " , " junior " )
sort _ by : < string >
source _ ids : < array of strings > REQUIRED
stage : < string >
Returns
Retrieve the profiles data as < dict >"""
|
query_params = { }
query_params [ "date_end" ] = _validate_timestamp ( date_end , "date_end" )
query_params [ "date_start" ] = _validate_timestamp ( date_start , "date_start" )
if filter_id :
query_params [ "filter_id" ] = _validate_filter_id ( filter_id )
if filter_reference :
query_params [ "filter_reference" ] = _validate_filter_reference ( filter_reference )
query_params [ "limit" ] = _validate_limit ( limit )
query_params [ "page" ] = _validate_page ( page )
query_params [ "seniority" ] = _validate_seniority ( seniority )
query_params [ "sort_by" ] = _validate_sort_by ( sort_by )
query_params [ "source_ids" ] = json . dumps ( _validate_source_ids ( source_ids ) )
query_params [ "stage" ] = _validate_stage ( stage )
query_params [ "order_by" ] = order_by
response = self . client . get ( "profiles" , query_params )
return response . json ( )
|
def release_lock ( self , verbose = VERBOSE , raiseError = RAISE_ERROR ) :
"""Release the lock when set and close file descriptor if opened .
: Parameters :
# . verbose ( bool ) : Whether to be verbose about errors when encountered
# . raiseError ( bool ) : Whether to raise error exception when encountered
: Returns :
# . result ( boolean ) : Whether the lock is succesfully released .
# . code ( integer , Exception ) : Integer code indicating the reason how the
lock was successfully or unsuccessfully released . When releasing the
lock generates an error , this will be caught and returned in a message
Exception code .
* 0 : Lock is not found , therefore successfully released
* 1 : Lock is found empty , therefore successfully released
* 2 : Lock is found owned by this locker and successfully released
* 3 : Lock is found owned by this locker and successfully released and locked file descriptor was successfully closed
* 4 : Lock is found owned by another locker , this locker has no permission to release it . Therefore unsuccessfully released
* Exception : Lock was not successfully released because of an unexpected error .
The error is caught and returned in this Exception . In this case
result is False ."""
|
if not os . path . isfile ( self . __lockPath ) :
released = True
code = 0
else :
try :
with open ( self . __lockPath , 'rb' ) as fd :
lock = fd . readlines ( )
except Exception as err :
code = Exception ( "Unable to read release lock file '%s' (%s)" % ( self . __lockPath , str ( err ) ) )
released = False
if verbose :
print ( str ( code ) )
if raiseError :
raise code
else :
if not len ( lock ) :
code = 1
released = True
elif lock [ 0 ] . rstrip ( ) == self . __lockPass . encode ( ) :
try :
with open ( self . __lockPath , 'wb' ) as f : # f . write ( ' ' . encode ( ' utf - 8 ' ) )
f . write ( '' . encode ( ) )
f . flush ( )
os . fsync ( f . fileno ( ) )
except Exception as err :
released = False
code = Exception ( "Unable to write release lock file '%s' (%s)" % ( self . __lockPath , str ( err ) ) )
if verbose :
print ( str ( code ) )
if raiseError :
raise code
else :
released = True
code = 2
else :
code = 4
released = False
# close file descriptor if lock is released and descriptor is not None
if released and self . __fd is not None :
try :
if not self . __fd . closed :
self . __fd . flush ( )
os . fsync ( self . __fd . fileno ( ) )
self . __fd . close ( )
except Exception as err :
code = Exception ( "Unable to close file descriptor of locked file '%s' (%s)" % ( self . __filePath , str ( err ) ) )
if verbose :
print ( str ( code ) )
if raiseError :
raise code
else :
code = 3
# return
return released , code
|
def set_display ( self , brightness = 100 , brightness_mode = "auto" ) :
"""allows to modify display state ( change brightness )
: param int brightness : display brightness [ 0 , 100 ] ( default : 100)
: param str brightness _ mode : the brightness mode of the display
[ auto , manual ] ( default : auto )"""
|
assert ( brightness_mode in ( "auto" , "manual" ) )
assert ( brightness in range ( 101 ) )
log . debug ( "setting display information..." )
cmd , url = DEVICE_URLS [ "set_display" ]
json_data = { "brightness_mode" : brightness_mode , "brightness" : brightness }
return self . _exec ( cmd , url , json_data = json_data )
|
def option_in_select ( browser , select_name , option ) :
"""Returns the Element specified by @ option or None
Looks at the real < select > not the select2 widget , since that doesn ' t
create the DOM until we click on it ."""
|
select = find_field ( browser , 'select' , select_name )
assert select , "Cannot find a '{}' select." . format ( select_name )
try :
return select . find_element_by_xpath ( str ( './/option[normalize-space(text())=%s]' % string_literal ( option ) ) )
except NoSuchElementException :
return None
|
async def status ( self , * args , ** kwargs ) :
"""Get task status
Get task status structure from ` taskId `
This method gives output : ` ` v1 / task - status - response . json # ` `
This method is ` ` stable ` `"""
|
return await self . _makeApiCall ( self . funcinfo [ "status" ] , * args , ** kwargs )
|
def bind_socket ( self , config ) :
""": meth : ` . WNetworkNativeTransportProto . bind _ socket ` method implementation"""
|
address = config [ self . __bind_socket_config . section ] [ self . __bind_socket_config . address_option ]
port = config . getint ( self . __bind_socket_config . section , self . __bind_socket_config . port_option )
return WIPV4SocketInfo ( address , port )
|
def write_named_socket ( self , socket_name , socket_info ) :
"""A multi - tenant , named alternative to ProcessManager . write _ socket ( ) ."""
|
self . write_metadata_by_name ( self . _name , 'socket_{}' . format ( socket_name ) , str ( socket_info ) )
|
def write ( cls , table , order = None , header = None , output = "table" , sort_keys = True , show_none = "" ) :
"""writes the information given in the table
: param table : the table of values
: param order : the order of the columns
: param header : the header for the columns
: param output : the format ( default is table , values are raw , csv , json , yaml , dict
: param sort _ keys : if true the table is sorted
: param show _ none : passed along to the list or dict printer
: return :"""
|
if output == "raw" :
return table
elif table is None :
return None
elif type ( table ) in [ dict , dotdict ] :
return cls . dict ( table , order = order , header = header , output = output , sort_keys = sort_keys , show_none = show_none )
elif type ( table ) == list :
return cls . list ( table , order = order , header = header , output = output , sort_keys = sort_keys , show_none = show_none )
else :
Console . error ( "unkown type {0}" . format ( type ( table ) ) )
|
def objectify_uri ( relative_uri ) :
'''Converts uris from path syntax to a json - like object syntax .
In addition , url escaped characters are unescaped , but non - ascii
characters a romanized using the unidecode library .
Examples :
" / blog / 3 / comments " becomes " blog [ 3 ] . comments "
" car / engine / piston " becomes " car . engine . piston "'''
|
def path_clean ( chunk ) :
if not chunk :
return chunk
if re . match ( r'\d+$' , chunk ) :
return '[{0}]' . format ( chunk )
else :
return '.' + chunk
if six . PY2 :
byte_arr = relative_uri . encode ( 'utf-8' )
else :
byte_arr = relative_uri
unquoted = decode ( unquote ( byte_arr ) , 'utf-8' )
nice_uri = unidecode . unidecode ( unquoted )
return '' . join ( path_clean ( c ) for c in nice_uri . split ( '/' ) )
|
def save_profile_id ( self , profile : Profile ) :
"""Store ID of profile locally .
. . versionadded : : 4.0.6"""
|
os . makedirs ( self . dirname_pattern . format ( profile = profile . username , target = profile . username ) , exist_ok = True )
with open ( self . _get_id_filename ( profile . username ) , 'w' ) as text_file :
text_file . write ( str ( profile . userid ) + "\n" )
self . context . log ( "Stored ID {0} for profile {1}." . format ( profile . userid , profile . username ) )
|
def ensure_path_exists ( dir_path ) :
"""Make sure that a path exists"""
|
if not os . path . exists ( dir_path ) :
os . makedirs ( dir_path )
return True
return False
|
def django_admin ( request ) :
'''Adds additional information to the context :
` ` django _ admin ` ` - boolean variable indicating whether the current
page is part of the django admin or not .
` ` ADMIN _ URL ` ` - normalized version of settings . ADMIN _ URL ; starts with a slash , ends without a slash
NOTE : do not set ADMIN _ URL = ' / ' in case your application provides functionality
outside of django admin as all incoming urls are interpreted as admin urls .'''
|
# ensure that adminurl always starts with a ' / ' but never ends with a ' / '
if settings . ADMIN_URL . endswith ( '/' ) :
admin_url = settings . ADMIN_URL [ : - 1 ]
if not settings . ADMIN_URL . startswith ( '/' ) :
admin_url = '/' + settings . ADMIN_URL
# add ADMIN _ URL and django _ admin to context
if request . META [ 'PATH_INFO' ] . startswith ( admin_url ) :
return { 'ADMIN_URL' : admin_url , 'django_admin' : True }
else :
return { 'django_admin' : False }
|
def tempo_account_get_all_account_by_customer_id ( self , customer_id ) :
"""Get un - archived Accounts by customer . The Caller must have the Browse Account permission for the Account .
: param customer _ id : the Customer id .
: return :"""
|
url = 'rest/tempo-accounts/1/account/customer/{customerId}/' . format ( customerId = customer_id )
return self . get ( url )
|
def _func_addrs_from_prologues ( self ) :
"""Scan the entire program image for function prologues , and start code scanning at those positions
: return : A list of possible function addresses"""
|
# Pre - compile all regexes
regexes = list ( )
for ins_regex in self . project . arch . function_prologs :
r = re . compile ( ins_regex )
regexes . append ( r )
# EDG says : I challenge anyone bothering to read this to come up with a better
# way to handle CPU modes that affect instruction decoding .
# Since the only one we care about is ARM / Thumb right now
# we have this gross hack . Sorry about that .
thumb_regexes = list ( )
if hasattr ( self . project . arch , 'thumb_prologs' ) :
for ins_regex in self . project . arch . thumb_prologs : # Thumb prologues are found at even addrs , but their actual addr is odd !
# Isn ' t that great ?
r = re . compile ( ins_regex )
thumb_regexes . append ( r )
# Construct the binary blob first
unassured_functions = [ ]
for start_ , bytes_ in self . _binary . memory . backers ( ) :
for regex in regexes : # Match them !
for mo in regex . finditer ( bytes_ ) :
position = mo . start ( ) + start_
if position % self . project . arch . instruction_alignment == 0 :
mapped_position = AT . from_rva ( position , self . _binary ) . to_mva ( )
if self . _addr_in_exec_memory_regions ( mapped_position ) :
unassured_functions . append ( mapped_position )
# HACK part 2 : Yes , i really have to do this
for regex in thumb_regexes : # Match them !
for mo in regex . finditer ( bytes_ ) :
position = mo . start ( ) + start_
if position % self . project . arch . instruction_alignment == 0 :
mapped_position = AT . from_rva ( position , self . _binary ) . to_mva ( )
if self . _addr_in_exec_memory_regions ( mapped_position ) :
unassured_functions . append ( mapped_position + 1 )
l . info ( "Found %d functions with prologue scanning." , len ( unassured_functions ) )
return unassured_functions
|
def ssh_cmdline ( self , cmd ) :
"""Get argument list for meth : ` subprocess . Popen ( ) ` to run ssh .
: param cmd :
a list of arguments to pass to ssh
: returns :
argument list to pass as the first argument to subprocess . Popen ( )
. . note : :
you must call : meth : ` connect ( ) ` at least once
before calling this method .
This method returns the ` ` args ` ` argument ( first argument ) to
subprocess . Popen ( ) required to execute the specified command on the
phablet device . You can use it to construct your own connections , to
intercept command output or to setup any additional things that you may
require .
. . versionadded : : 0.2"""
|
if not isinstance ( cmd , list ) :
raise TypeError ( "cmd needs to be a list" )
if not all ( isinstance ( item , str ) for item in cmd ) :
raise TypeError ( "cmd needs to be a list of strings" )
if self . _port is None :
raise ProgrammingError ( "run connect() first" )
ssh_cmd = [ 'ssh' ]
for opt in self . _get_ssh_options ( ) :
ssh_cmd . append ( '-o' )
ssh_cmd . append ( opt )
ssh_cmd . extend ( [ 'phablet@localhost' , '--' ] )
ssh_cmd . extend ( cmd )
_logger . debug ( "ssh_cmdline %r => %r" , cmd , ssh_cmd )
return ssh_cmd
|
def serialize_args ( self ) :
"""Returns ( args , kwargs ) to be used when deserializing this parameter ."""
|
args , kwargs = super ( ListParameter , self ) . serialize_args ( )
args . insert ( 0 , [ self . param_type . id , self . param_type . serialize_args ( ) ] )
|
def get_axis_value_discrete ( self , axis ) :
"""Return the axis value in discrete steps for a given axis event .
How a value translates into a discrete step depends on the source .
If the source is : attr : ` ~ libinput . constant . PointerAxisSource . WHEEL ` ,
the discrete value correspond to the number of physical mouse wheel
clicks .
If the source is : attr : ` ~ libinput . constant . PointerAxisSource . CONTINUOUS `
or : attr : ` ~ libinput . constant . PointerAxisSource . FINGER ` , the discrete
value is always 0.
Args :
axis ( ~ libinput . constant . PointerAxis ) : The axis who ' s value to get .
Returns :
float : The discrete value for the given event .
Raises :
AttributeError"""
|
if self . type != EventType . POINTER_AXIS :
raise AttributeError ( _wrong_meth . format ( self . type ) )
return self . _libinput . libinput_event_pointer_get_axis_value_discrete ( self . _handle , axis )
|
def client ( whyrun = False , localmode = False , logfile = None , ** kwargs ) :
'''Execute a chef client run and return a dict with the stderr , stdout ,
return code , and pid .
CLI Example :
. . code - block : : bash
salt ' * ' chef . client server = https : / / localhost
server
The chef server URL
client _ key
Set the client key file location
config
The configuration file to use
config - file - jail
Directory under which config files are allowed to be loaded
( no client . rb or knife . rb outside this path will be loaded ) .
environment
Set the Chef Environment on the node
group
Group to set privilege to
json - attributes
Load attributes from a JSON file or URL
localmode
Point chef - client at local repository if True
log _ level
Set the log level ( debug , info , warn , error , fatal )
logfile
Set the log file location
node - name
The node name for this client
override - runlist
Replace current run list with specified items for a single run
pid
Set the PID file location , defaults to / tmp / chef - client . pid
run - lock - timeout
Set maximum duration to wait for another client run to finish ,
default is indefinitely .
runlist
Permanently replace current run list with specified items
user
User to set privilege to
validation _ key
Set the validation key file location , used for registering new clients
whyrun
Enable whyrun mode when set to True'''
|
if logfile is None :
logfile = _default_logfile ( 'chef-client' )
args = [ 'chef-client' , '--no-color' , '--once' , '--logfile "{0}"' . format ( logfile ) , '--format doc' ]
if whyrun :
args . append ( '--why-run' )
if localmode :
args . append ( '--local-mode' )
return _exec_cmd ( * args , ** kwargs )
|
def to_dp ( self ) :
"""Convert to darkplaces color format
: return :"""
|
text = self . text . replace ( '^' , '^^' )
return '%s%s' % ( self . color . to_dp ( ) , text )
|
def get_nc_attrs ( nc ) :
"""Gets netCDF file metadata attributes .
Arguments :
nc ( netCDF4 . Dataset ) : an open NetCDF4 Dataset to pull attributes from .
Returns :
dict : Metadata as extracted from the netCDF file ."""
|
meta = { 'experiment' : nc . experiment_id , 'frequency' : nc . frequency , 'institute' : nc . institute_id , 'model' : nc . model_id , 'modeling_realm' : nc . modeling_realm , 'ensemble_member' : 'r{}i{}p{}' . format ( nc . realization , nc . initialization_method , nc . physics_version ) , }
variable_name = get_var_name ( nc )
if variable_name :
meta . update ( { 'variable_name' : variable_name } )
return meta
|
def _get_firmware_update_xml_for_file_and_component ( self , filename , component ) :
"""Creates the dynamic xml for flashing the device firmware via iLO .
This method creates the dynamic xml for flashing the firmware , based
on the component type so passed .
: param filename : location of the raw firmware file .
: param component _ type : Type of component to be applied to .
: returns : the etree . Element for the root of the RIBCL XML
for flashing the device ( component ) firmware ."""
|
if component == 'ilo' :
cmd_name = 'UPDATE_RIB_FIRMWARE'
else : # Note ( deray ) : Not explicitly checking for all other supported
# devices ( components ) , as those checks have already happened
# in the invoking methods and may seem redundant here .
cmd_name = 'UPDATE_FIRMWARE'
fwlen = os . path . getsize ( filename )
root = self . _create_dynamic_xml ( cmd_name , 'RIB_INFO' , 'write' , subelements = { 'IMAGE_LOCATION' : filename , 'IMAGE_LENGTH' : str ( fwlen ) } )
return root
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.