signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def swaplevel ( self , i = - 2 , j = - 1 , copy = True ) :
"""Swap levels i and j in a MultiIndex .
Parameters
i , j : int , str ( can be mixed )
Level of index to be swapped . Can pass level name as string .
Returns
Series
Series with levels swapped in MultiIndex .
. . versionchanged : : 0.18.1
The indexes ` ` i ` ` and ` ` j ` ` are now optional , and default to
the two innermost levels of the index ."""
|
new_index = self . index . swaplevel ( i , j )
return self . _constructor ( self . _values , index = new_index , copy = copy ) . __finalize__ ( self )
|
def count_lines_to_next_cell ( cell_end_marker , next_cell_start , total , explicit_eoc ) :
"""How many blank lines between end of cell marker and next cell ?"""
|
if cell_end_marker < total :
lines_to_next_cell = next_cell_start - cell_end_marker
if explicit_eoc :
lines_to_next_cell -= 1
if next_cell_start >= total :
lines_to_next_cell += 1
return lines_to_next_cell
return 1
|
def maps_re_apply_policy_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
maps_re_apply_policy = ET . Element ( "maps_re_apply_policy" )
config = maps_re_apply_policy
input = ET . SubElement ( maps_re_apply_policy , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def guess_encoding ( self ) :
"""Guess encoding using the language , falling back on chardet .
: return : the guessed encoding .
: rtype : str"""
|
logger . info ( 'Guessing encoding for language %s' , self . language )
# always try utf - 8 first
encodings = [ 'utf-8' ]
# add language - specific encodings
if self . language . alpha3 == 'zho' :
encodings . extend ( [ 'gb18030' , 'big5' ] )
elif self . language . alpha3 == 'jpn' :
encodings . append ( 'shift-jis' )
elif self . language . alpha3 == 'ara' :
encodings . append ( 'windows-1256' )
elif self . language . alpha3 == 'heb' :
encodings . append ( 'windows-1255' )
elif self . language . alpha3 == 'tur' :
encodings . extend ( [ 'iso-8859-9' , 'windows-1254' ] )
elif self . language . alpha3 == 'pol' : # Eastern European Group 1
encodings . extend ( [ 'windows-1250' ] )
elif self . language . alpha3 == 'bul' : # Eastern European Group 2
encodings . extend ( [ 'windows-1251' ] )
else : # Western European ( windows - 1252)
encodings . append ( 'latin-1' )
# try to decode
logger . debug ( 'Trying encodings %r' , encodings )
for encoding in encodings :
try :
self . content . decode ( encoding )
except UnicodeDecodeError :
pass
else :
logger . info ( 'Guessed encoding %s' , encoding )
return encoding
logger . warning ( 'Could not guess encoding from language' )
# fallback on chardet
encoding = chardet . detect ( self . content ) [ 'encoding' ]
logger . info ( 'Chardet found encoding %s' , encoding )
return encoding
|
def GET_AUTH ( self , courseid ) : # pylint : disable = arguments - differ
"""GET request"""
|
course , __ = self . get_course_and_check_rights ( courseid , allow_all_staff = False )
return self . show_page ( course , web . input ( ) )
|
def _get_python_cmd ( self ) :
"""return the python executable in the virtualenv .
Try first sys . executable but use fallbacks ."""
|
file_names = [ "pypy.exe" , "python.exe" , "python" ]
executable = sys . executable
if executable is not None :
executable = os . path . split ( executable ) [ 1 ]
file_names . insert ( 0 , executable )
return self . _get_bin_file ( * file_names )
|
def get_valid_build_systems ( working_dir , package = None ) :
"""Returns the build system classes that could build the source in given dir .
Args :
working _ dir ( str ) : Dir containing the package definition and potentially
build files .
package ( ` Package ` ) : Package to be built . This may or may not be needed
to determine the build system . For eg , cmake just has to look for
a CMakeLists . txt file , whereas the ' build _ command ' package field
must be present for the ' custom ' build system type .
Returns :
List of class : Valid build system class types ."""
|
from rez . plugin_managers import plugin_manager
from rez . exceptions import PackageMetadataError
try :
package = package or get_developer_package ( working_dir )
except PackageMetadataError : # no package , or bad package
pass
if package :
if getattr ( package , "build_command" , None ) is not None :
buildsys_name = "custom"
else :
buildsys_name = getattr ( package , "build_system" , None )
# package explicitly specifies build system
if buildsys_name :
cls = plugin_manager . get_plugin_class ( 'build_system' , buildsys_name )
return [ cls ]
# detect valid build systems
clss = [ ]
for buildsys_name in get_buildsys_types ( ) :
cls = plugin_manager . get_plugin_class ( 'build_system' , buildsys_name )
if cls . is_valid_root ( working_dir , package = package ) :
clss . append ( cls )
# Sometimes files for multiple build systems can be present , because one
# build system uses another ( a ' child ' build system ) - eg , cmake uses
# make . Detect this case and ignore files from the child build system .
child_clss = set ( x . child_build_system ( ) for x in clss )
clss = list ( set ( clss ) - child_clss )
return clss
|
def basicConfig ( level = logging . WARNING , transient_level = logging . NOTSET ) :
"""Shortcut for setting up transient logging
I am a replica of ` ` logging . basicConfig ` ` which installs a
transient logging handler to stderr ."""
|
fmt = "%(asctime)s [%(levelname)s] [%(name)s:%(lineno)d] %(message)s"
logging . root . setLevel ( transient_level )
# < - - - IMPORTANT
hand = TransientStreamHandler ( level = level )
hand . setFormatter ( logging . Formatter ( fmt ) )
logging . root . addHandler ( hand )
|
def validate_unwrap ( self , value , session = None ) :
'''Validates that the DBRef is valid as well as can be done without
retrieving it .'''
|
if not isinstance ( value , DBRef ) :
self . _fail_validation_type ( value , DBRef )
if self . type :
expected = self . type . type . get_collection_name ( )
got = value . collection
if expected != got :
self . _fail_validation ( value , '''Wrong collection for reference: ''' '''got "%s" instead of "%s" ''' % ( got , expected ) )
if self . db_required and not value . database :
self . _fail_validation ( value , 'db_required=True, but not database specified' )
if self . db and value . database and self . db != value . database :
self . _fail_validation ( value , '''Wrong database for reference: ''' ''' got "%s" instead of "%s" ''' % ( value . database , self . db ) )
|
def _set_tlv_type ( self , v , load = False ) :
"""Setter method for tlv _ type , mapped from YANG variable / protocol / cfm / domain _ name / ma _ name / cfm _ ma _ sub _ commands / mep / cfm _ mep _ sub _ commands / tlv _ type ( ccm - tlv - type )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ tlv _ type is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ tlv _ type ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = unicode , restriction_type = "dict_key" , restriction_arg = { u'port-status-tlv' : { 'value' : 1 } } , ) , is_leaf = True , yang_name = "tlv-type" , rest_name = "tlv-type" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'info' : u'set TLV' , u'cli-full-no' : None , u'callpoint' : u'setDot1agTlvType' } } , namespace = 'urn:brocade.com:mgmt:brocade-dot1ag' , defining_module = 'brocade-dot1ag' , yang_type = 'ccm-tlv-type' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """tlv_type must be of a type compatible with ccm-tlv-type""" , 'defined-type' : "brocade-dot1ag:ccm-tlv-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'port-status-tlv': {'value': 1}},), is_leaf=True, yang_name="tlv-type", rest_name="tlv-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'set TLV', u'cli-full-no': None, u'callpoint': u'setDot1agTlvType'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='ccm-tlv-type', is_config=True)""" , } )
self . __tlv_type = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_parent_log_nodes ( self ) :
"""Gets the parents of this log .
return : ( osid . logging . LogNodeList ) - the parents of this log
* compliance : mandatory - - This method must be implemented . *"""
|
parent_log_nodes = [ ]
for node in self . _my_map [ 'parentNodes' ] :
parent_log_nodes . append ( LogNode ( node . _my_map , runtime = self . _runtime , proxy = self . _proxy , lookup_session = self . _lookup_session ) )
return LogNodeList ( parent_log_nodes )
|
def add_lifecycle_delete_rule ( self , ** kw ) :
"""Add a " delete " rule to lifestyle rules configured for this bucket .
See https : / / cloud . google . com / storage / docs / lifecycle and
https : / / cloud . google . com / storage / docs / json _ api / v1 / buckets
. . literalinclude : : snippets . py
: start - after : [ START add _ lifecycle _ delete _ rule ]
: end - before : [ END add _ lifecycle _ delete _ rule ]
: type kw : dict
: params kw : arguments passed to : class : ` LifecycleRuleConditions ` ."""
|
rules = list ( self . lifecycle_rules )
rules . append ( LifecycleRuleDelete ( ** kw ) )
self . lifecycle_rules = rules
|
def pkg ( pkg_path , pkg_sum , hash_type , test = None , ** kwargs ) :
'''Execute a packaged state run , the packaged state run will exist in a
tarball available locally . This packaged state
can be generated using salt - ssh .
CLI Example :
. . code - block : : bash
salt ' * ' state . pkg / tmp / salt _ state . tgz 760a9353810e36f6d81416366fc426dc md5'''
|
# TODO - Add ability to download from salt master or other source
popts = salt . utils . state . get_sls_opts ( __opts__ , ** kwargs )
if not os . path . isfile ( pkg_path ) :
return { }
if not salt . utils . hashutils . get_hash ( pkg_path , hash_type ) == pkg_sum :
return { }
root = tempfile . mkdtemp ( )
s_pkg = tarfile . open ( pkg_path , 'r:gz' )
# Verify that the tarball does not extract outside of the intended root
members = s_pkg . getmembers ( )
for member in members :
if salt . utils . stringutils . to_unicode ( member . path ) . startswith ( ( os . sep , '..{0}' . format ( os . sep ) ) ) :
return { }
elif '..{0}' . format ( os . sep ) in salt . utils . stringutils . to_unicode ( member . path ) :
return { }
s_pkg . extractall ( root )
s_pkg . close ( )
lowstate_json = os . path . join ( root , 'lowstate.json' )
with salt . utils . files . fopen ( lowstate_json , 'r' ) as fp_ :
lowstate = salt . utils . json . load ( fp_ )
# Check for errors in the lowstate
for chunk in lowstate :
if not isinstance ( chunk , dict ) :
return lowstate
pillar_json = os . path . join ( root , 'pillar.json' )
if os . path . isfile ( pillar_json ) :
with salt . utils . files . fopen ( pillar_json , 'r' ) as fp_ :
pillar_override = salt . utils . json . load ( fp_ )
else :
pillar_override = None
roster_grains_json = os . path . join ( root , 'roster_grains.json' )
if os . path . isfile ( roster_grains_json ) :
with salt . utils . files . fopen ( roster_grains_json , 'r' ) as fp_ :
roster_grains = salt . utils . json . load ( fp_ )
if os . path . isfile ( roster_grains_json ) :
popts [ 'grains' ] = roster_grains
popts [ 'fileclient' ] = 'local'
popts [ 'file_roots' ] = { }
popts [ 'test' ] = _get_test_value ( test , ** kwargs )
envs = os . listdir ( root )
for fn_ in envs :
full = os . path . join ( root , fn_ )
if not os . path . isdir ( full ) :
continue
popts [ 'file_roots' ] [ fn_ ] = [ full ]
st_ = salt . state . State ( popts , pillar_override = pillar_override )
snapper_pre = _snapper_pre ( popts , kwargs . get ( '__pub_jid' , 'called localy' ) )
ret = st_ . call_chunks ( lowstate )
ret = st_ . call_listen ( lowstate , ret )
try :
shutil . rmtree ( root )
except ( IOError , OSError ) :
pass
_set_retcode ( ret )
_snapper_post ( popts , kwargs . get ( '__pub_jid' , 'called localy' ) , snapper_pre )
return ret
|
def generate_request_access_signature ( parameters , secret_key ) :
"""Generate the parameter signature used during third party access requests"""
|
# pull out the parameter keys
keys = parameters . keys ( )
# alphanumerically sort the keys in place
keys . sort ( )
# create an array of url encoded key : value pairs
encoded_pairs = [ urlencode ( { key : parameters [ key ] } ) for key in keys ]
# create the serialized parameters in a single , URL style string
serialized_parameters = '&' . join ( encoded_pairs )
# create the string with the secret key and the parameters which will be hashed
string_to_hash = '%s:%s' % ( secret_key , serialized_parameters )
# return the hex digest of the hashed string
return sha256 ( string_to_hash ) . hexdigest ( )
|
def stylize ( obj , style = 'plastique' , theme = 'projexui' ) :
"""Styles the inputed object with the given options .
: param obj | < QtGui . QWidget > | | < QtGui . QApplication >
style | < str >
base | < str >"""
|
obj . setStyle ( style )
if theme :
sheet = resources . read ( 'styles/{0}/style.css' . format ( theme ) )
if sheet :
obj . setStyleSheet ( sheet )
|
def do_counter_conversion ( self ) :
"""Update latest value to the diff between it and the previous value"""
|
if self . is_counter :
if self . _previous_counter_value is None :
prev_value = self . latest_value
else :
prev_value = self . _previous_counter_value
self . _previous_counter_value = self . latest_value
self . latest_value = self . latest_value - prev_value
|
def collections ( self , values ) :
"""Set list of collections ."""
|
# if cache server is configured , save collection list
if self . cache :
self . cache . set ( self . app . config [ 'COLLECTIONS_CACHE_KEY' ] , values )
|
def register_inline_handler ( self , callback , * custom_filters , state = None , run_task = None , ** kwargs ) :
"""Register handler for inline query
Example :
. . code - block : : python3
dp . register _ inline _ handler ( some _ inline _ handler , lambda inline _ query : True )
: param callback :
: param custom _ filters : list of custom filters
: param state :
: param run _ task : run callback in task ( no wait results )
: param kwargs :
: return : decorated function"""
|
if custom_filters is None :
custom_filters = [ ]
filters_set = self . filters_factory . resolve ( self . inline_query_handlers , * custom_filters , state = state , ** kwargs )
self . inline_query_handlers . register ( self . _wrap_async_task ( callback , run_task ) , filters_set )
|
def _list_records ( self , rtype = None , name = None , content = None ) :
"""Connects to Hetzner account and returns a list of records filtered by record
rtype , name and content . The list is empty if no records found ."""
|
with self . _session ( self . domain , self . domain_id ) as ddata :
name = self . _fqdn_name ( name ) if name else None
return self . _list_records_in_zone ( ddata [ 'zone' ] [ 'data' ] , rtype , name , content )
|
def upload ( ) :
"""Uploads an artifact referenced by a run ."""
|
build = g . build
utils . jsonify_assert ( len ( request . files ) == 1 , 'Need exactly one uploaded file' )
file_storage = request . files . values ( ) [ 0 ]
data = file_storage . read ( )
content_type , _ = mimetypes . guess_type ( file_storage . filename )
artifact = _save_artifact ( build , data , content_type )
db . session . add ( artifact )
db . session . commit ( )
return flask . jsonify ( success = True , build_id = build . id , sha1sum = artifact . id , content_type = content_type )
|
def late ( ) :
"""Used by functions in package . py that are evaluated lazily .
The term ' late ' refers to the fact these package attributes are evaluated
late , ie when the attribute is queried for the first time .
If you want to implement a package . py attribute as a function , you MUST use
this decorator - otherwise it is understood that you want your attribute to
be a function , not the return value of that function ."""
|
from rez . package_resources_ import package_rex_keys
def decorated ( fn ) : # this is done here rather than in standard schema validation because
# the latter causes a very obfuscated error message
if fn . __name__ in package_rex_keys :
raise ValueError ( "Cannot use @late decorator on function '%s'" % fn . __name__ )
setattr ( fn , "_late" , True )
_add_decorator ( fn , "late" )
return fn
return decorated
|
def _rise_set_trig ( t , target , location , prev_next , rise_set ) :
"""Crude time at next rise / set of ` ` target ` ` using spherical trig .
This method is ~ 15 times faster than ` _ calcriseset ` ,
and inherently does * not * take the atmosphere into account .
The time returned should not be used in calculations ; the purpose
of this routine is to supply a guess to ` _ calcriseset ` .
Parameters
t : ` ~ astropy . time . Time ` or other ( see below )
Time of observation . This will be passed in as the first argument to
the ` ~ astropy . time . Time ` initializer , so it can be anything that
` ~ astropy . time . Time ` will accept ( including a ` ~ astropy . time . Time `
object )
target : ` ~ astropy . coordinates . SkyCoord `
Position of target or multiple positions of that target
at multiple times ( if target moves , like the Sun )
location : ` ~ astropy . coordinates . EarthLocation `
Observatory location
prev _ next : str - either ' previous ' or ' next '
Test next rise / set or previous rise / set
rise _ set : str - either ' rising ' or ' setting '
Compute prev / next rise or prev / next set
Returns
ret1 : ` ~ astropy . time . Time `
Time of rise / set"""
|
dec = target . transform_to ( coord . ICRS ) . dec
with warnings . catch_warnings ( ) :
warnings . simplefilter ( 'ignore' )
# ignore astropy deprecation warnings
lat = location . latitude
cosHA = - np . tan ( dec ) * np . tan ( lat . radian )
# find the absolute value of the hour Angle
HA = coord . Longitude ( np . fabs ( np . arccos ( cosHA ) ) )
# if rise , HA is - ve and vice versa
if rise_set == 'rising' :
HA = - HA
# LST = HA + RA
LST = HA + target . ra
return _astropy_time_from_LST ( t , LST , location , prev_next )
|
async def update ( self ) :
"""Update sirbot
Trigger the update method of the plugins . This is needed if the plugins
need to perform update migration ( i . e database )"""
|
logger . info ( 'Updating Sir Bot-a-lot' )
for name , plugin in self . _plugins . items ( ) :
plugin_update = getattr ( plugin [ 'plugin' ] , 'update' , None )
if callable ( plugin_update ) :
logger . info ( 'Updating %s' , name )
await plugin_update ( self . config . get ( name , { } ) , self . _plugins )
logger . info ( '%s updated' , name )
self . _session . close ( )
logger . info ( 'Sir Bot-a-lot updated' )
|
def get_model_voice ( self , app , model_item ) :
"""Model voice
Returns the js menu compatible voice dict if the user
can see it , None otherwise"""
|
if model_item . get ( 'name' , None ) is None :
raise ImproperlyConfigured ( 'Model menu voices must have a name key' )
# noqa
if self . check_model_permission ( app , model_item . get ( 'name' , None ) ) :
return { 'type' : 'model' , 'label' : model_item . get ( 'label' , '' ) , 'icon' : model_item . get ( 'icon' , None ) , 'url' : self . apps_dict [ app ] [ 'models' ] [ model_item . get ( 'name' ) ] [ 'admin_url' ] , # noqa
}
return None
|
def _load_data ( self ) :
"""Load all fixtures from : attr : ` fixtures _ dir `"""
|
filenames = [ ]
model_identifiers = defaultdict ( list )
# attempt to load fixture files from given directories ( first pass )
# for each valid model fixture file , read it into the cache and get the
# list of identifier keys from it
for fixtures_dir in self . fixture_dirs :
for filename in os . listdir ( fixtures_dir ) :
path = os . path . join ( fixtures_dir , filename )
file_ext = filename [ filename . find ( '.' ) + 1 : ]
# make sure it ' s a valid fixture file
if os . path . isfile ( path ) and file_ext in { 'yml' , 'yaml' } :
filenames . append ( filename )
with open ( path ) as f :
self . _cache [ filename ] = f . read ( )
# preload to determine identifier keys
with self . _preloading_env ( ) as env :
rendered_yaml = env . get_template ( filename ) . render ( )
data = yaml . load ( rendered_yaml )
if data :
class_name = filename [ : filename . rfind ( '.' ) ]
model_identifiers [ class_name ] = list ( data . keys ( ) )
# second pass where we can render the jinja templates with knowledge of all
# the model identifier keys ( allows random _ model and random _ models to work )
for filename in filenames :
self . _load_from_yaml ( filename , model_identifiers )
self . _loaded = True
|
def setIcon ( self , icon ) :
"""Sets the icon for this hotspot . If this method is called with a valid
icon , then the style will automatically switch to Icon , otherwise ,
the style will be set to Invisible .
: param icon | < QIcon > | | < str > | | None"""
|
icon = QIcon ( icon )
if icon . isNull ( ) :
self . _icon = None
self . _style = XNodeHotspot . Style . Invisible
else :
self . _icon = icon
self . _style = XNodeHotspot . Style . Icon
|
def profile_get ( user , default_hidden = True ) :
'''List profiles for user
user : string
username
default _ hidden : boolean
hide default profiles
CLI Example :
. . code - block : : bash
salt ' * ' rbac . profile _ get leo
salt ' * ' rbac . profile _ get leo default _ hidden = False'''
|
user_profiles = [ ]
# # read user _ attr file ( user : qualifier : res1 : res2 : attr )
with salt . utils . files . fopen ( '/etc/user_attr' , 'r' ) as user_attr :
for profile in user_attr :
profile = salt . utils . stringutils . to_unicode ( profile )
profile = profile . strip ( ) . split ( ':' )
# skip comments and non complaint lines
if len ( profile ) != 5 :
continue
# skip other users
if profile [ 0 ] != user :
continue
# parse attr
attrs = { }
for attr in profile [ 4 ] . strip ( ) . split ( ';' ) :
attr_key , attr_val = attr . strip ( ) . split ( '=' )
if attr_key in [ 'auths' , 'profiles' , 'roles' ] :
attrs [ attr_key ] = attr_val . strip ( ) . split ( ',' )
else :
attrs [ attr_key ] = attr_val
if 'profiles' in attrs :
user_profiles . extend ( attrs [ 'profiles' ] )
# # remove default profiles
if default_hidden :
for profile in profile_list ( default_only = True ) :
if profile in user_profiles :
user_profiles . remove ( profile )
return list ( set ( user_profiles ) )
|
def copy_entry_to_entry ( self , fromentry , destentry , check_for_dupes = True , compare_to_existing = True ) :
"""Used by ` merge _ duplicates `"""
|
self . log . info ( "Copy entry object '{}' to '{}'" . format ( fromentry [ fromentry . _KEYS . NAME ] , destentry [ destentry . _KEYS . NAME ] ) )
newsourcealiases = { }
if self . proto . _KEYS . SOURCES in fromentry :
for source in fromentry [ self . proto . _KEYS . SOURCES ] :
alias = source . pop ( SOURCE . ALIAS )
newsourcealiases [ alias ] = source
newmodelaliases = { }
if self . proto . _KEYS . MODELS in fromentry :
for model in fromentry [ self . proto . _KEYS . MODELS ] :
alias = model . pop ( MODEL . ALIAS )
newmodelaliases [ alias ] = model
if self . proto . _KEYS . ERRORS in fromentry :
for err in fromentry [ self . proto . _KEYS . ERRORS ] :
destentry . setdefault ( self . proto . _KEYS . ERRORS , [ ] ) . append ( err )
for rkey in fromentry :
key = fromentry . _KEYS . get_key_by_name ( rkey )
if key . no_source :
continue
for item in fromentry [ key ] : # isd = False
if 'source' not in item :
raise ValueError ( "Item has no source!" )
nsid = [ ]
for sid in item [ 'source' ] . split ( ',' ) :
if sid in newsourcealiases :
source = newsourcealiases [ sid ]
nsid . append ( destentry . add_source ( ** source ) )
else :
raise ValueError ( "Couldn't find source alias!" )
item [ 'source' ] = uniq_cdl ( nsid )
if 'model' in item :
nmid = [ ]
for mid in item [ 'model' ] . split ( ',' ) :
if mid in newmodelaliases :
model = newmodelaliases [ mid ]
nmid . append ( destentry . add_model ( ** model ) )
else :
raise ValueError ( "Couldn't find model alias!" )
item [ 'model' ] = uniq_cdl ( nmid )
if key == ENTRY . PHOTOMETRY :
destentry . add_photometry ( compare_to_existing = compare_to_existing , ** item )
elif key == ENTRY . SPECTRA :
destentry . add_spectrum ( compare_to_existing = compare_to_existing , ** item )
elif key == ENTRY . ERRORS :
destentry . add_error ( ** item )
elif key == ENTRY . MODELS :
continue
else :
destentry . add_quantity ( compare_to_existing = compare_to_existing , check_for_dupes = False , quantities = key , ** item )
return
|
def move_round_to ( self , points , steps ) :
"""Follow a path pre - defined by a set of at least 4 points . This Path will
interpolate the points into a curve and follow that curve .
: param points : The list of points that defines the path .
: param steps : The number of steps to take to follow the path ."""
|
# Spline interpolation needs a before and after point for the curve .
# Duplicate the first and last points to handle this . We also need
# to move from the current position to the first specified point .
points . insert ( 0 , ( self . _rec_x , self . _rec_y ) )
points . insert ( 0 , ( self . _rec_x , self . _rec_y ) )
points . append ( points [ - 1 ] )
# Convert the points into an interpolated set of more detailed points .
steps_per_spline = steps // ( len ( points ) - 3 )
for j in range ( 1 , len ( points ) - 2 ) :
for t in range ( 1 , steps_per_spline + 1 ) :
y = _spline ( float ( t ) / steps_per_spline , float ( points [ j - 1 ] [ 1 ] ) , float ( points [ j ] [ 1 ] ) , float ( points [ j + 1 ] [ 1 ] ) , float ( points [ j + 2 ] [ 1 ] ) )
x = int ( points [ j ] [ 0 ] + ( ( points [ j + 1 ] [ 0 ] - points [ j ] [ 0 ] ) * float ( t ) / steps_per_spline ) )
self . _add_step ( ( x , int ( y ) ) )
|
def nearest_tile_to_node_using_tiles ( tile_ids , node_coord ) :
"""Get the first tile found adjacent to the given node . Returns a tile identifier .
: param tile _ ids : tiles to look at for adjacency , list ( Tile . tile _ id )
: param node _ coord : node coordinate to find an adjacent tile to , int
: return : tile identifier of an adjacent tile , Tile . tile _ id"""
|
for tile_id in tile_ids :
if node_coord - tile_id_to_coord ( tile_id ) in _tile_node_offsets . keys ( ) :
return tile_id
logging . critical ( 'Did not find a tile touching node={}' . format ( node_coord ) )
|
def _DoSection ( args , context , callback , trace ) :
"""{ . section foo }"""
|
block = args
# If a section present and " true " , push the dictionary onto the stack as the
# new context , and show it
if context . PushSection ( block . section_name , block . pre_formatters ) :
_Execute ( block . Statements ( ) , context , callback , trace )
context . Pop ( )
else : # missing or " false " - - show the { . or } section
context . Pop ( )
_Execute ( block . Statements ( 'or' ) , context , callback , trace )
|
def _downcase_word ( text , pos ) :
"""Lowercase the current ( or following ) word ."""
|
text , new_pos = _forward_word ( text , pos )
return text [ : pos ] + text [ pos : new_pos ] . lower ( ) + text [ new_pos : ] , new_pos
|
def set_paths ( etc_paths = [ "/etc/" ] ) :
"""Sets the paths where the configuration files will be searched
* You can have multiple configuration files ( e . g . in the / etc / default folder
and in / etc / appfolder / )"""
|
global _ETC_PATHS
_ETC_PATHS = [ ]
for p in etc_paths :
_ETC_PATHS . append ( os . path . expanduser ( p ) )
|
def add_years ( datetime_like_object , n , return_date = False ) :
"""Returns a time that n years after a time .
: param datetimestr : a datetime object or a datetime str
: param n : number of years , value can be negative
: param return _ date : returns a date object instead of datetime
* * 中文文档 * *
返回给定日期N年之后的时间 。"""
|
a_datetime = parser . parse_datetime ( datetime_like_object )
# try assign year , month , day
try :
a_datetime = datetime ( a_datetime . year + n , a_datetime . month , a_datetime . day , a_datetime . hour , a_datetime . minute , a_datetime . second , a_datetime . microsecond , tzinfo = a_datetime . tzinfo , )
except ValueError : # Must be xxxx - 02-29
a_datetime = datetime ( a_datetime . year + n , 2 , 28 , a_datetime . hour , a_datetime . minute , a_datetime . second , a_datetime . microsecond )
if return_date : # pragma : no cover
return a_datetime . date ( )
else :
return a_datetime
|
def availability_set_get ( name , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Get a dictionary representing an availability set ' s properties .
: param name : The availability set to get .
: param resource _ group : The resource group name assigned to the
availability set .
CLI Example :
. . code - block : : bash
salt - call azurearm _ compute . availability _ set _ get testset testgroup'''
|
compconn = __utils__ [ 'azurearm.get_client' ] ( 'compute' , ** kwargs )
try :
av_set = compconn . availability_sets . get ( resource_group_name = resource_group , availability_set_name = name )
result = av_set . as_dict ( )
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'compute' , str ( exc ) , ** kwargs )
result = { 'error' : str ( exc ) }
return result
|
def cleanup_failed_attacks ( self ) :
"""Cleans up data of failed attacks ."""
|
print_header ( 'Cleaning up failed attacks' )
attacks_to_replace = { }
self . attack_work . read_all_from_datastore ( )
failed_submissions = set ( )
error_msg = set ( )
for k , v in iteritems ( self . attack_work . work ) :
if v [ 'error' ] is not None :
attacks_to_replace [ k ] = dict ( v )
failed_submissions . add ( v [ 'submission_id' ] )
error_msg . add ( v [ 'error' ] )
attacks_to_replace [ k ] . update ( { 'claimed_worker_id' : None , 'claimed_worker_start_time' : None , 'is_completed' : False , 'error' : None , 'elapsed_time' : None , } )
self . attack_work . replace_work ( attacks_to_replace )
print ( 'Affected submissions:' )
print ( ' ' . join ( sorted ( failed_submissions ) ) )
print ( 'Error messages:' )
print ( ' ' . join ( sorted ( error_msg ) ) )
print ( '' )
inp = input_str ( 'Are you sure? (type "yes" without quotes to confirm): ' )
if inp != 'yes' :
return
self . attack_work . write_all_to_datastore ( )
print ( 'Work cleaned up' )
|
def parse ( self , text ) :
"""The parser entry point .
Parse the provided text to check for its validity .
On success , the parsing tree is available into the result
attribute . It is a list of sievecommands . Command objects ( see
the module documentation for specific information ) .
On error , an string containing the explicit reason is
available into the error attribute .
: param text : a string containing the data to parse
: return : True on success ( no error detected ) , False otherwise"""
|
if isinstance ( text , text_type ) :
text = text . encode ( "utf-8" )
self . __reset_parser ( )
try :
for ttype , tvalue in self . lexer . scan ( text ) :
if ttype == "hash_comment" :
self . hash_comments += [ tvalue . strip ( ) ]
continue
if ttype == "bracket_comment" :
continue
if self . __expected is not None :
if ttype not in self . __expected :
if self . lexer . pos < len ( text ) :
msg = ( "%s found while %s expected near '%s'" % ( ttype , "|" . join ( self . __expected ) , text [ self . lexer . pos ] ) )
else :
msg = ( "%s found while %s expected at end of file" % ( ttype , "|" . join ( self . __expected ) ) )
raise ParseError ( msg )
self . __expected = None
if not self . __command ( ttype , tvalue ) :
msg = ( "unexpected token '%s' found near '%s'" % ( tvalue . decode ( ) , text . decode ( ) [ self . lexer . pos ] ) )
raise ParseError ( msg )
if self . __opened_blocks :
self . __set_expected ( "right_cbracket" )
if self . __expected is not None :
raise ParseError ( "end of script reached while %s expected" % "|" . join ( self . __expected ) )
except ( ParseError , CommandError ) as e :
self . error = "line %d: %s" % ( self . lexer . curlineno ( ) , str ( e ) )
return False
return True
|
def mi ( mi , iq = None , pl = None ) : # pylint : disable = redefined - outer - name
"""This function is a wrapper for
: meth : ` ~ pywbem . WBEMConnection . ModifyInstance ` .
Modify the property values of an instance .
Parameters :
mi ( : class : ` ~ pywbem . CIMInstance ` ) :
Modified instance , also indicating its instance path .
The properties defined in this object specify the new property
values for the instance to be modified . Missing properties
( relative to the class declaration ) and properties provided with
a value of ` None ` will be set to NULL .
iq ( : class : ` py : bool ` ) :
IncludeQualifiers flag : Modify instance qualifiers as specified in
the instance .
` None ` will cause the server default of ` True ` to be used .
Deprecated in : term : ` DSP0200 ` : Clients cannot rely on qualifiers to
be modified by this operation .
pl ( : term : ` string ` or : term : ` py : iterable ` of : term : ` string ` ) :
PropertyList : Names of properties to be modified . An empty iterable
indicates to modify no properties . If ` None ` , all properties exposed
by the instance will be modified ."""
|
CONN . ModifyInstance ( mi , IncludeQualifiers = iq , PropertyList = pl )
|
def rpush ( self , name , * values ) :
"""Push the value into the list from the * right * side
: param name : str the name of the redis key
: param values : a list of values or single value to push
: return : Future ( )"""
|
with self . pipe as pipe :
v_encode = self . valueparse . encode
values = [ v_encode ( v ) for v in self . _parse_values ( values ) ]
return pipe . rpush ( self . redis_key ( name ) , * values )
|
def as_blocks ( self , copy = True ) :
"""Convert the frame to a dict of dtype - > Constructor Types that each has
a homogeneous dtype .
. . deprecated : : 0.21.0
NOTE : the dtypes of the blocks WILL BE PRESERVED HERE ( unlike in
as _ matrix )
Parameters
copy : boolean , default True
Returns
values : a dict of dtype - > Constructor Types"""
|
warnings . warn ( "as_blocks is deprecated and will " "be removed in a future version" , FutureWarning , stacklevel = 2 )
return self . _to_dict_of_blocks ( copy = copy )
|
def increment_cell_value ( self , column_family_id , column , int_value ) :
"""Increments a value in an existing cell .
Assumes the value in the cell is stored as a 64 bit integer
serialized to bytes .
. . note : :
This method adds a read - modify rule protobuf to the accumulated
read - modify rules on this row , but does not make an API
request . To actually send an API request ( with the rules ) to the
Google Cloud Bigtable API , call : meth : ` commit ` .
For example :
. . literalinclude : : snippets _ table . py
: start - after : [ START bigtable _ row _ increment _ cell _ value ]
: end - before : [ END bigtable _ row _ increment _ cell _ value ]
: type column _ family _ id : str
: param column _ family _ id : The column family that contains the column .
Must be of the form
` ` [ _ a - zA - Z0-9 ] [ - _ . a - zA - Z0-9 ] * ` ` .
: type column : bytes
: param column : The column within the column family where the cell
is located .
: type int _ value : int
: param int _ value : The value to increment the existing value in the cell
by . If the targeted cell is unset , it will be treated
as containing a zero . Otherwise , the targeted cell
must contain an 8 - byte value ( interpreted as a 64 - bit
big - endian signed integer ) , or the entire request
will fail ."""
|
column = _to_bytes ( column )
rule_pb = data_v2_pb2 . ReadModifyWriteRule ( family_name = column_family_id , column_qualifier = column , increment_amount = int_value , )
self . _rule_pb_list . append ( rule_pb )
|
def get_suitable_slot_for_duplicate ( self , src_slot ) :
"""Returns the suitable position for a duplicate analysis , taking into
account if there is a WorksheetTemplate assigned to this worksheet .
By default , returns a new slot at the end of the worksheet unless there
is a slot defined for a duplicate of the src _ slot in the worksheet
template layout not yet used .
: param src _ slot :
: return : suitable slot position for a duplicate of src _ slot"""
|
slot_from = to_int ( src_slot , 0 )
if slot_from < 1 :
return - 1
# Are the analyses from src _ slot suitable for duplicates creation ?
container = self . get_container_at ( slot_from )
if not container or not IAnalysisRequest . providedBy ( container ) : # We cannot create duplicates from analyses other than routine ones ,
# those that belong to an Analysis Request .
return - 1
occupied = self . get_slot_positions ( type = 'all' )
wst = self . getWorksheetTemplate ( )
if not wst : # No worksheet template assigned , add a new slot at the end of
# the worksheet with the duplicate there
slot_to = max ( occupied ) + 1
return slot_to
# If there is a match with the layout defined in the Worksheet
# Template , use that slot instead of adding a new one at the end of
# the worksheet
layout = wst . getLayout ( )
for pos in layout :
if pos [ 'type' ] != 'd' or to_int ( pos [ 'dup' ] ) != slot_from :
continue
slot_to = int ( pos [ 'pos' ] )
if slot_to in occupied : # Not an empty slot
continue
# This slot is empty , use it instead of adding a new
# slot at the end of the worksheet
return slot_to
# Add a new slot at the end of the worksheet , but take into account
# that a worksheet template is assigned , so we need to take care to
# not override slots defined by its layout
occupied . append ( len ( layout ) )
slot_to = max ( occupied ) + 1
return slot_to
|
async def unpinChatMessage ( self , chat_id ) :
"""See : https : / / core . telegram . org / bots / api # unpinchatmessage"""
|
p = _strip ( locals ( ) )
return await self . _api_request ( 'unpinChatMessage' , _rectify ( p ) )
|
def get ( self , variable_path : str , default : t . Optional [ t . Any ] = None , coerce_type : t . Optional [ t . Type ] = None , coercer : t . Optional [ t . Callable ] = None , ** kwargs ) :
"""Reads a value of ` ` variable _ path ` ` from environment .
If ` ` coerce _ type ` ` is ` ` bool ` ` and no ` ` coercer ` ` specified , ` ` coerces ` ` forced to be
: func : ` ~ django _ docker _ helpers . utils . coerce _ str _ to _ bool `
: param variable _ path : a delimiter - separated path to a nested value
: param default : default value if there ' s no object by specified path
: param coerce _ type : cast a type of a value to a specified one
: param coercer : perform a type casting with specified callback
: param kwargs : additional arguments inherited parser may need
: return : value or default"""
|
var_name = self . get_env_var_name ( variable_path )
val = self . env . get ( var_name , self . sentinel )
if val is self . sentinel :
return default
# coerce to bool with default env coercer if no coercer specified
if coerce_type and coerce_type is bool and not coercer :
coercer = coerce_str_to_bool
return self . coerce ( val , coerce_type = coerce_type , coercer = coercer )
|
def iflat_tasks_wti ( self , status = None , op = "==" , nids = None ) :
"""Generator to iterate over all the tasks of the ` Flow ` .
Yields :
( task , work _ index , task _ index )
If status is not None , only the tasks whose status satisfies
the condition ( task . status op status ) are selected
status can be either one of the flags defined in the : class : ` Task ` class
( e . g Task . S _ OK ) or a string e . g " S _ OK "
nids is an optional list of node identifiers used to filter the tasks ."""
|
return self . _iflat_tasks_wti ( status = status , op = op , nids = nids , with_wti = True )
|
def _recv_thread ( self ) :
"""Internal thread to iterate over source messages and dispatch callbacks ."""
|
for msg , metadata in self . _source :
if msg . msg_type :
self . _call ( msg , ** metadata )
# Break any upstream iterators
for sink in self . _sinks :
i = sink ( )
if i is not None :
i . breakiter ( )
self . _dead = True
|
def string_profiler ( string , start_delimiter = '(' , end_delimiter = ')' , remove = True ) :
'''long = ' ( life is is good ) love world " ( blah ) blah " " here I am " once again " yes " blah '
print ( string _ profiler ( long ) )
null = ' '
print ( string _ profiler ( null ) )
short = ' ( life love ) yes ( and much more ) '
print ( string _ profiler ( short ) )
short = ' yes " life love " '
print ( string _ profiler ( short ) )'''
|
mark = 0
string_list = [ ]
tmp_string = ''
for i in range ( len ( string ) ) :
curr_index = i + mark
if curr_index == len ( string ) :
break
if string [ curr_index ] == start_delimiter :
flag = True
else :
flag = False
if flag :
if tmp_string :
string_list . extend ( tmp_string . strip ( ) . split ( ) )
tmp_string = ''
quoted_string = ''
for j in range ( curr_index + 1 , len ( string ) ) :
mark += 1
if string [ j ] == end_delimiter :
break
quoted_string += string [ j ]
if not remove :
string_list . append ( quoted_string )
else :
tmp_string += string [ curr_index ]
if tmp_string :
string_list . extend ( tmp_string . strip ( ) . split ( ) )
return string_list
|
def readlink ( link ) :
"""readlink ( link ) - > target
Return a string representing the path to which the symbolic link points ."""
|
handle = api . CreateFile ( link , 0 , 0 , None , api . OPEN_EXISTING , api . FILE_FLAG_OPEN_REPARSE_POINT | api . FILE_FLAG_BACKUP_SEMANTICS , None , )
if handle == api . INVALID_HANDLE_VALUE :
raise WindowsError ( )
res = reparse . DeviceIoControl ( handle , api . FSCTL_GET_REPARSE_POINT , None , 10240 )
bytes = create_string_buffer ( res )
p_rdb = cast ( bytes , POINTER ( api . REPARSE_DATA_BUFFER ) )
rdb = p_rdb . contents
if not rdb . tag == api . IO_REPARSE_TAG_SYMLINK :
raise RuntimeError ( "Expected IO_REPARSE_TAG_SYMLINK, but got %d" % rdb . tag )
handle_nonzero_success ( api . CloseHandle ( handle ) )
return rdb . get_substitute_name ( )
|
def step ( self , data ) :
"""Run convolution over a single position . The data must be exactly as wide as the convolution filters .
: param data : Shape : ( batch _ size , kernel _ width , num _ hidden ) .
: return : Single result of a convolution . Shape : ( batch _ size , 1 , num _ hidden ) ."""
|
# As we only run convolution over a single window that is exactly the size of the convolutional filter
# we can use FullyConnected instead of Convolution for efficiency reasons . Additionally we do not need to
# perform any masking .
num_hidden = self . _pre_activation_num_hidden ( )
# ( batch _ size , num _ hidden , kernel _ width )
data = mx . sym . swapaxes ( data , dim1 = 1 , dim2 = 2 )
# ( batch _ size , num _ hidden * kernel _ width )
data = mx . sym . reshape ( data , shape = ( 0 , - 3 ) )
# ( preact _ num _ hidden , num _ hidden * kernel _ width )
weight = mx . sym . reshape ( self . conv_weight , shape = ( 0 , - 3 ) )
data_conv = mx . sym . FullyConnected ( data = data , weight = weight , bias = self . conv_bias , num_hidden = num_hidden )
# ( batch _ size , num _ hidden , 1)
data_conv = mx . sym . expand_dims ( data_conv , axis = 2 )
return self . _post_convolution ( data_conv )
|
def nearest_neighbor ( self , vectors , num = 10 , batch_size = 100 , show_progressbar = False , return_names = True ) :
"""Find the nearest neighbors to some arbitrary vector .
This function is meant to be used in composition operations . The
most _ similar function can only handle items that are in vocab , and
looks up their vector through a dictionary . Compositions , e . g .
" King - man + woman " are necessarily not in the vocabulary .
Parameters
vectors : list of arrays or numpy array
The vectors to find the nearest neighbors to .
num : int , optional , default 10
The number of most similar items to retrieve .
batch _ size : int , optional , default 100.
The batch size to use . 100 is a good default option . Increasing
the batch size may increase speed .
show _ progressbar : bool , optional , default False
Whether to show a progressbar .
return _ names : bool , optional , default True
Whether to return the item names , or just the distances .
Returns
sim : list of tuples .
For each item in the input the num most similar items are returned
in the form of ( NAME , DISTANCE ) tuples . If return _ names is set to
false , only the distances are returned ."""
|
vectors = np . array ( vectors )
if np . ndim ( vectors ) == 1 :
vectors = vectors [ None , : ]
result = [ ]
result = self . _batch ( vectors , batch_size , num + 1 , show_progressbar , return_names )
return list ( result )
|
def query_boost_version ( boost_root ) :
'''Read in the Boost version from a given boost _ root .'''
|
boost_version = None
if os . path . exists ( os . path . join ( boost_root , 'Jamroot' ) ) :
with codecs . open ( os . path . join ( boost_root , 'Jamroot' ) , 'r' , 'utf-8' ) as f :
for line in f . readlines ( ) :
parts = line . split ( )
if len ( parts ) >= 5 and parts [ 1 ] == 'BOOST_VERSION' :
boost_version = parts [ 3 ]
break
if not boost_version :
boost_version = 'default'
return boost_version
|
def _detect ( detector , st , threshold , trig_int , moveout = 0 , min_trig = 0 , process = True , extract_detections = False , cores = 1 , debug = 0 ) :
"""Detect within continuous data using the subspace method .
Not to be called directly , use the detector . detect method .
: type detector : eqcorrscan . core . subspace . Detector
: param detector : Detector to use .
: type st : obspy . core . stream . Stream
: param st : Un - processed stream to detect within using the subspace detector
: type threshold : float
: param threshold : Threshold value for detections between 0-1
: type trig _ int : float
: param trig _ int : Minimum trigger interval in seconds .
: type moveout : float
: param moveout : Maximum allowable moveout window for non - multiplexed ,
network detection . See note .
: type min _ trig : int
: param min _ trig : Minimum number of stations exceeding threshold for non - multiplexed , network detection . See note .
: type process : bool
: param process : Whether or not to process the stream according to the parameters defined by the detector . Default is to process the data ( True ) .
: type extract _ detections : bool
: param extract _ detections : Whether to extract waveforms for each detection or not , if true will return detections and streams .
: type debug : int
: param debug : Debug output level from 0-5.
: return : list of detections
: rtype : list of eqcorrscan . core . match _ filter . Detection"""
|
detections = [ ]
# First process the stream
if process :
debug_print ( 'Processing Stream' , 0 , debug )
stream , stachans = _subspace_process ( streams = [ st . copy ( ) ] , lowcut = detector . lowcut , highcut = detector . highcut , filt_order = detector . filt_order , sampling_rate = detector . sampling_rate , multiplex = detector . multiplex , stachans = detector . stachans , parallel = True , align = False , shift_len = None , reject = False , cores = cores )
else : # Check the sampling rate at the very least
for tr in st :
if not tr . stats . sampling_rate == detector . sampling_rate :
raise ValueError ( 'Sampling rates do not match.' )
stream = [ st ]
stachans = detector . stachans
outtic = time . clock ( )
# If multiplexed , how many samples do we increment by ?
if detector . multiplex :
Nc = len ( detector . stachans )
else :
Nc = 1
# Here do all ffts
fft_vars = _do_ffts ( detector , stream , Nc )
debug_print ( 'Computing detection statistics' , 0 , debug )
debug_print ( 'Preallocating stats matrix' , 0 , debug )
stats = np . zeros ( ( len ( stream [ 0 ] ) , ( len ( stream [ 0 ] [ 0 ] ) // Nc ) - ( fft_vars [ 4 ] // Nc ) + 1 ) )
for det_freq , data_freq_sq , data_freq , i in zip ( fft_vars [ 0 ] , fft_vars [ 1 ] , fft_vars [ 2 ] , np . arange ( len ( stream [ 0 ] ) ) ) : # Calculate det _ statistic in frequency domain
stats [ i ] = _det_stat_freq ( det_freq , data_freq_sq , data_freq , fft_vars [ 3 ] , Nc , fft_vars [ 4 ] , fft_vars [ 5 ] )
debug_print ( 'Stats matrix is shape %s' % str ( stats [ i ] . shape ) , 0 , debug )
if debug >= 3 :
fig , ax = plt . subplots ( )
t = np . arange ( len ( stats [ i ] ) )
ax . plot ( t , stats [ i ] , color = 'k' )
ax . axis ( 'tight' )
ax . set_ylim ( [ 0 , 1 ] )
ax . plot ( [ min ( t ) , max ( t ) ] , [ threshold , threshold ] , color = 'r' , lw = 1 , label = 'Threshold' )
ax . legend ( )
plt . title ( '%s' % str ( stream [ 0 ] [ i ] . stats . station ) )
plt . show ( )
trig_int_samples = detector . sampling_rate * trig_int
debug_print ( 'Finding peaks' , 0 , debug )
peaks = [ ]
for i in range ( len ( stream [ 0 ] ) ) :
peaks . append ( findpeaks . find_peaks2_short ( arr = stats [ i ] , thresh = threshold , trig_int = trig_int_samples , debug = debug ) )
if not detector . multiplex : # Conduct network coincidence triggering
peaks = findpeaks . coin_trig ( peaks = peaks , samp_rate = detector . sampling_rate , moveout = moveout , min_trig = min_trig , stachans = stachans , trig_int = trig_int )
else :
peaks = peaks [ 0 ]
if len ( peaks ) > 0 :
for peak in peaks :
detecttime = st [ 0 ] . stats . starttime + ( peak [ 1 ] / detector . sampling_rate )
rid = ResourceIdentifier ( id = detector . name + '_' + str ( detecttime ) , prefix = 'smi:local' )
ev = Event ( resource_id = rid )
cr_i = CreationInfo ( author = 'EQcorrscan' , creation_time = UTCDateTime ( ) )
ev . creation_info = cr_i
# All detection info in Comments for lack of a better idea
thresh_str = 'threshold=' + str ( threshold )
ccc_str = 'detect_val=' + str ( peak [ 0 ] )
used_chans = 'channels used: ' + ' ' . join ( [ str ( pair ) for pair in detector . stachans ] )
ev . comments . append ( Comment ( text = thresh_str ) )
ev . comments . append ( Comment ( text = ccc_str ) )
ev . comments . append ( Comment ( text = used_chans ) )
for stachan in detector . stachans :
tr = st . select ( station = stachan [ 0 ] , channel = stachan [ 1 ] )
if tr :
net_code = tr [ 0 ] . stats . network
else :
net_code = ''
pick_tm = detecttime
wv_id = WaveformStreamID ( network_code = net_code , station_code = stachan [ 0 ] , channel_code = stachan [ 1 ] )
ev . picks . append ( Pick ( time = pick_tm , waveform_id = wv_id ) )
detections . append ( Detection ( template_name = detector . name , detect_time = detecttime , no_chans = len ( detector . stachans ) , detect_val = peak [ 0 ] , threshold = threshold , typeofdet = 'subspace' , threshold_type = 'abs' , threshold_input = threshold , chans = detector . stachans , event = ev ) )
outtoc = time . clock ( )
print ( 'Detection took %s seconds' % str ( outtoc - outtic ) )
if extract_detections :
detection_streams = extract_from_stream ( st , detections )
return detections , detection_streams
return detections
|
def newest_packages ( pypi_server = "https://pypi.python.org/pypi?%3Aaction=packages_rss" ) :
"""Constructs a request to the PyPI server and returns a list of
: class : ` yarg . parse . Package ` .
: param pypi _ server : ( option ) URL to the PyPI server .
> > > import yarg
> > > yarg . newest _ packages ( )
[ < Package yarg > , < Package gray > , < Package ragy > ]"""
|
items = _get ( pypi_server )
i = [ ]
for item in items :
i_dict = { 'name' : item [ 0 ] . text . split ( ) [ 0 ] , 'url' : item [ 1 ] . text , 'description' : item [ 3 ] . text , 'date' : item [ 4 ] . text }
i . append ( Package ( i_dict ) )
return i
|
def dispatch ( self , request , * args , ** kwargs ) :
'''Handle the session data passed by the prior view .'''
|
lessonSession = request . session . get ( PRIVATELESSON_VALIDATION_STR , { } )
try :
self . lesson = PrivateLessonEvent . objects . get ( id = lessonSession . get ( 'lesson' ) )
except ( ValueError , ObjectDoesNotExist ) :
messages . error ( request , _ ( 'Invalid lesson identifier passed to sign-up form.' ) )
return HttpResponseRedirect ( reverse ( 'bookPrivateLesson' ) )
expiry = parse_datetime ( lessonSession . get ( 'expiry' , '' ) , )
if not expiry or expiry < timezone . now ( ) :
messages . info ( request , _ ( 'Your registration session has expired. Please try again.' ) )
return HttpResponseRedirect ( reverse ( 'bookPrivateLesson' ) )
self . payAtDoor = lessonSession . get ( 'payAtDoor' , False )
return super ( PrivateLessonStudentInfoView , self ) . dispatch ( request , * args , ** kwargs )
|
from typing import List
def max_jumps ( nums : List [ int ] , target : int ) -> int :
"""Calculate the maximum number of jumps to reach the end of the array ,
obeying the condition that the absolute difference between the starting
and ending element of the jump should not exceed the target .
Args :
nums ( List [ int ] ) : A list of integers .
target ( int ) : An integer representing the maximum difference allowed between
the starting and ending element of a jump .
Returns :
int : The maximum number of jumps to reach the end of the array . If it is not
possible to reach the end , return - 1.
Example :
> > > max _ jumps ( [ 1,3,6,4,1,2 ] , 2)
Explanation : The jumping sequence is as follows :
- Jump from index 0 to index 1
- Jump from index 1 to index 3
- Jump from index 3 to index 5"""
|
n = len ( nums )
dp = [ 0 ]
for i in range ( 1 , n ) :
dp . append ( - 1 )
# Initialize the dp [ i ] with - 1
for j in range ( i ) :
if - target <= nums [ j ] - nums [ i ] <= target : # absolute difference not exceeding the target
if dp [ j ] != - 1 and ( dp [ i ] == - 1 or dp [ j ] + 1 > dp [ i ] ) :
dp [ i ] = dp [ j ] + 1
return dp [ - 1 ]
|
def write ( self , outputfile = 'out.pdb' , appended = False ) :
"""Save the second PDB file aligned to the first .
If appended is True , both are saved as different chains ."""
|
# FIXME some cases don ' t work .
matrix = self . get_matrix ( ** self . get_current_values ( ) )
out = open ( outputfile , 'w' )
atomid = 1
if appended :
for line in open ( self . pdb1 ) :
if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_1 and line [ 21 ] != ' ' ) :
continue
out . write ( line [ : 7 ] )
out . write ( '{: >4}' . format ( atomid ) )
atomid += 1
out . write ( line [ 11 : 21 ] )
out . write ( 'A' )
out . write ( line [ 22 : ] )
for line in open ( self . pdb2 ) :
if not line . startswith ( 'ATOM' ) or ( line [ 21 ] != self . chain_2 and line [ 21 ] != ' ' ) :
continue
x = float ( line [ 32 : 38 ] )
y = float ( line [ 39 : 46 ] )
z = float ( line [ 48 : 54 ] )
vec = np . array ( [ x , y , z , 1 ] )
x , y , z , _ = matrix . dot ( vec )
out . write ( line [ : 7 ] )
out . write ( '{: >4}' . format ( atomid ) )
atomid += 1
out . write ( line [ 11 : 21 ] )
out . write ( 'B' )
out . write ( line [ 22 : 30 ] )
out . write ( '{:>8.3f}{:>8.3f}{:>8.3f}' . format ( x , y , z ) )
out . write ( line [ 54 : ] )
out . close ( )
|
def load_raw_data ( assets , data_query_cutoff_times , expr , odo_kwargs , checkpoints = None ) :
"""Given an expression representing data to load , perform normalization and
forward - filling and return the data , materialized . Only accepts data with a
` sid ` field .
Parameters
assets : pd . int64index
the assets to load data for .
data _ query _ cutoff _ times : pd . DatetimeIndex
The datetime when data should no longer be considered available for
a session .
expr : expr
the expression representing the data to load .
odo _ kwargs : dict
extra keyword arguments to pass to odo when executing the expression .
checkpoints : expr , optional
the expression representing the checkpointed data for ` expr ` .
Returns
raw : pd . dataframe
The result of computing expr and materializing the result as a
dataframe ."""
|
lower_dt , upper_dt = data_query_cutoff_times [ [ 0 , - 1 ] ]
raw = ffill_query_in_range ( expr , lower_dt , upper_dt , checkpoints = checkpoints , odo_kwargs = odo_kwargs , )
sids = raw [ SID_FIELD_NAME ]
raw . drop ( sids [ ~ sids . isin ( assets ) ] . index , inplace = True )
return raw
|
def subseq ( cls , fasta , start = None , stop = None , strand = None ) :
"""Take Bio . SeqRecord and slice " start : stop " from it , does proper
index and error handling"""
|
start = start - 1 if start is not None else 0
stop = stop if stop is not None else len ( fasta )
if start < 0 :
msg = "start ({0}) must > 0 of `{1}`. Reset to 1" . format ( start + 1 , fasta . id )
logging . error ( msg )
start = 0
if stop > len ( fasta ) :
msg = "stop ({0}) must be <= length of `{1}` ({2}). Reset to {2}." . format ( stop , fasta . id , len ( fasta ) )
logging . error ( msg )
stop = len ( fasta )
seq = fasta . seq [ start : stop ]
if strand in ( - 1 , '-1' , '-' ) :
seq = seq . reverse_complement ( )
return seq
|
def _format_obj ( self , item = None ) :
"""Determines the type of the object and maps it to the correct
formatter"""
|
# Order here matters , odd behavior with tuples
if item is None :
return getattr ( self , 'number' ) ( item )
elif isinstance ( item , self . str_ ) : # : String
return item + " "
elif isinstance ( item , bytes ) : # : Bytes
return getattr ( self , 'bytes' ) ( item )
elif isinstance ( item , self . numeric_ ) : # : Float , int , etc .
return getattr ( self , 'number' ) ( item )
elif isinstance ( item , self . dict_ ) : # : Dict
return getattr ( self , 'dict' ) ( item )
elif isinstance ( item , self . list_ ) : # : List
return getattr ( self , 'list' ) ( item )
elif isinstance ( item , tuple ) : # : Tuple
return getattr ( self , 'tuple' ) ( item )
elif isinstance ( item , types . GeneratorType ) : # : Generator
return getattr ( self , 'generator' ) ( item )
elif isinstance ( item , self . set_ ) : # : Set
return getattr ( self , 'set' ) ( item )
elif isinstance ( item , deque ) : # : Deque
return getattr ( self , 'deque' ) ( item )
elif isinstance ( item , Sequence ) : # : Sequence
return getattr ( self , 'sequence' ) ( item )
# : Any other object
return getattr ( self , 'object' ) ( item )
|
def ADD ( self , params ) :
"""ADD [ Rx , ] Ry , [ Rz , PC ]
ADD [ Rx , ] [ SP , PC ] , # imm10_4
ADD [ SP , ] SP , # imm9_4
Add Ry and Rz and store the result in Rx
Rx , Ry , and Rz can be any register
If Rx is omitted , then it is assumed to be Ry"""
|
# This instruction allows for an optional destination register
# If it is omitted , then it is assumed to be Rb
# As defined in http : / / infocenter . arm . com / help / index . jsp ? topic = / com . arm . doc . dui0662b / index . html
# TODO can we have ADD SP , # imm9_4?
try :
Rx , Ry , Rz = self . get_three_parameters ( self . THREE_PARAMETER_COMMA_SEPARATED , params )
except iarm . exceptions . ParsingError :
Ry , Rz = self . get_two_parameters ( self . TWO_PARAMETER_COMMA_SEPARATED , params )
Rx = Ry
if self . is_register ( Rz ) : # ADD Rx , Ry , Rz
self . check_arguments ( any_registers = ( Rx , Ry , Rz ) )
if Rx != Ry :
raise iarm . exceptions . RuleError ( "Second parameter {} does not equal first parameter {}" . format ( Ry , Rx ) )
def ADD_func ( ) :
self . register [ Rx ] = self . register [ Ry ] + self . register [ Rz ]
else :
if Rx == 'SP' : # ADD SP , SP , # imm9_4
self . check_arguments ( imm9_4 = ( Rz , ) )
if Rx != Ry :
raise iarm . exceptions . RuleError ( "Second parameter {} is not SP" . format ( Ry ) )
else : # ADD Rx , [ SP , PC ] , # imm10_4
self . check_arguments ( any_registers = ( Rx , ) , imm10_4 = ( Rz , ) )
if Ry not in ( 'SP' , 'PC' ) :
raise iarm . exceptions . RuleError ( "Second parameter {} is not SP or PC" . format ( Ry ) )
def ADD_func ( ) :
self . register [ Rx ] = self . register [ Ry ] + self . convert_to_integer ( Rz [ 1 : ] )
return ADD_func
|
def forward ( self , # pylint : disable = arguments - differ
inputs : torch . Tensor , mask : torch . LongTensor ) -> torch . Tensor :
"""Parameters
inputs : ` ` torch . Tensor ` ` , required .
A Tensor of shape ` ` ( batch _ size , sequence _ length , hidden _ size ) ` ` .
mask : ` ` torch . LongTensor ` ` , required .
A binary mask of shape ` ` ( batch _ size , sequence _ length ) ` ` representing the
non - padded elements in each sequence in the batch .
Returns
A ` ` torch . Tensor ` ` of shape ( num _ layers , batch _ size , sequence _ length , hidden _ size ) ,
where the num _ layers dimension represents the LSTM output from that layer ."""
|
batch_size , total_sequence_length = mask . size ( )
stacked_sequence_output , final_states , restoration_indices = self . sort_and_run_forward ( self . _lstm_forward , inputs , mask )
num_layers , num_valid , returned_timesteps , encoder_dim = stacked_sequence_output . size ( )
# Add back invalid rows which were removed in the call to sort _ and _ run _ forward .
if num_valid < batch_size :
zeros = stacked_sequence_output . new_zeros ( num_layers , batch_size - num_valid , returned_timesteps , encoder_dim )
stacked_sequence_output = torch . cat ( [ stacked_sequence_output , zeros ] , 1 )
# The states also need to have invalid rows added back .
new_states = [ ]
for state in final_states :
state_dim = state . size ( - 1 )
zeros = state . new_zeros ( num_layers , batch_size - num_valid , state_dim )
new_states . append ( torch . cat ( [ state , zeros ] , 1 ) )
final_states = new_states
# It ' s possible to need to pass sequences which are padded to longer than the
# max length of the sequence to a Seq2StackEncoder . However , packing and unpacking
# the sequences mean that the returned tensor won ' t include these dimensions , because
# the RNN did not need to process them . We add them back on in the form of zeros here .
sequence_length_difference = total_sequence_length - returned_timesteps
if sequence_length_difference > 0 :
zeros = stacked_sequence_output . new_zeros ( num_layers , batch_size , sequence_length_difference , stacked_sequence_output [ 0 ] . size ( - 1 ) )
stacked_sequence_output = torch . cat ( [ stacked_sequence_output , zeros ] , 2 )
self . _update_states ( final_states , restoration_indices )
# Restore the original indices and return the sequence .
# Has shape ( num _ layers , batch _ size , sequence _ length , hidden _ size )
return stacked_sequence_output . index_select ( 1 , restoration_indices )
|
def normalize_getitem_args ( args ) :
'''Turns the arguments to _ _ getitem _ _ magic methods into a uniform
list of tuples and strings'''
|
if not isinstance ( args , tuple ) :
args = ( args , )
return_val = [ ]
for arg in args :
if isinstance ( arg , six . string_types + ( int , ) ) :
return_val . append ( arg )
elif isinstance ( arg , slice ) :
return_val . append ( ( arg . start , arg . stop ) )
else :
raise TypeError ( 'Brackets cannot contain objects of type {0.__name__}' . format ( type ( arg ) ) )
return return_val
|
def analysis ( self ) :
"""Get ANALYSIS segment of the FCS file ."""
|
if self . _analysis is None :
with open ( self . path , 'rb' ) as f :
self . read_analysis ( f )
return self . _analysis
|
def from_dict ( cls , d ) :
"""Decode a dictionary , as from : meth : ` to _ dict ` , into a Dmrs object ."""
|
def _node ( obj ) :
return Node ( obj . get ( 'nodeid' ) , Pred . surface_or_abstract ( obj . get ( 'predicate' ) ) , sortinfo = obj . get ( 'sortinfo' ) , lnk = _lnk ( obj . get ( 'lnk' ) ) , surface = obj . get ( 'surface' ) , base = obj . get ( 'base' ) , carg = obj . get ( 'carg' ) )
def _link ( obj ) :
return Link ( obj . get ( 'from' ) , obj . get ( 'to' ) , obj . get ( 'rargname' ) , obj . get ( 'post' ) )
def _lnk ( o ) :
return None if o is None else Lnk . charspan ( o [ 'from' ] , o [ 'to' ] )
return cls ( nodes = [ _node ( n ) for n in d . get ( 'nodes' , [ ] ) ] , links = [ _link ( l ) for l in d . get ( 'links' , [ ] ) ] , lnk = _lnk ( d . get ( 'lnk' ) ) , surface = d . get ( 'surface' ) , identifier = d . get ( 'identifier' ) )
|
def update_resource ( self , resource ) :
'''Perform an atomic update for an existing resource'''
|
index = self . resources . index ( resource )
data = { 'resources__{index}' . format ( index = index ) : resource }
self . update ( ** data )
self . reload ( )
post_save . send ( self . __class__ , document = self )
|
def find_next ( lines , find_str , start_index ) :
"""Find the next instance of find _ str from lines starting from start _ index .
: param lines : Lines to look through
: param find _ str : String or Invert to look for
: param start _ index : Index to start from
: return : ( boolean , index , line )"""
|
mode = None
if isinstance ( find_str , basestring ) :
mode = 'normal'
message = find_str
elif isinstance ( find_str , Invert ) :
mode = 'invert'
message = str ( find_str )
else :
raise TypeError ( "Unsupported message type" )
for i in range ( start_index , len ( lines ) ) :
if re . search ( message , lines [ i ] ) :
return mode == 'normal' , i , lines [ i ]
elif message in lines [ i ] :
return mode == 'normal' , i , lines [ i ]
if mode == 'invert' :
return True , len ( lines ) , None
raise LookupError ( "Not found" )
|
def _get_frame ( self , key ) :
"""Creates a clone of the Layout with the nth - frame for each
Element ."""
|
cached = self . current_key is None
layout_frame = self . layout . clone ( shared_data = False )
if key == self . current_key and not self . _force :
return self . current_frame
else :
self . current_key = key
key_map = dict ( zip ( [ d . name for d in self . dimensions ] , key ) )
for path , item in self . layout . items ( ) :
frame = get_nested_plot_frame ( item , key_map , cached )
if frame is not None :
layout_frame [ path ] = frame
traverse_setter ( self , '_force' , False )
self . current_frame = layout_frame
return layout_frame
|
async def _dump_tuple ( self , writer , elem , elem_type , params = None ) :
"""Dumps tuple of elements to the writer .
: param writer :
: param elem :
: param elem _ type :
: param params :
: return :"""
|
if len ( elem ) != len ( elem_type . f_specs ( ) ) :
raise ValueError ( "Fixed size tuple has not defined size: %s" % len ( elem_type . f_specs ( ) ) )
await dump_uvarint ( writer , len ( elem ) )
elem_fields = params [ 0 ] if params else None
if elem_fields is None :
elem_fields = elem_type . f_specs ( )
for idx , elem in enumerate ( elem ) :
try :
self . tracker . push_index ( idx )
await self . dump_field ( writer , elem , elem_fields [ idx ] , params [ 1 : ] if params else None )
self . tracker . pop ( )
except Exception as e :
raise helpers . ArchiveException ( e , tracker = self . tracker ) from e
|
def check_window ( self , name = "default" , level = 0 , baseline = False ) :
"""* * * Automated Visual Testing with SeleniumBase * * *
The first time a test calls self . check _ window ( ) for a unique " name "
parameter provided , it will set a visual baseline , meaning that it
creates a folder , saves the URL to a file , saves the current window
screenshot to a file , and creates the following three files
with the listed data saved :
tags _ level1 . txt - > HTML tags from the window
tags _ level2 . txt - > HTML tags + attributes from the window
tags _ level3 . txt - > HTML tags + attributes / values from the window
Baseline folders are named based on the test name and the name
parameter passed to self . check _ window ( ) . The same test can store
multiple baseline folders .
If the baseline is being set / reset , the " level " doesn ' t matter .
After the first run of self . check _ window ( ) , it will compare the
HTML tags of the latest window to the one from the initial run .
Here ' s how the level system works :
* level = 0 - >
DRY RUN ONLY - Will perform a comparison to the baseline , and
print out any differences that are found , but
won ' t fail the test even if differences exist .
* level = 1 - >
HTML tags are compared to tags _ level1 . txt
* level = 2 - >
HTML tags are compared to tags _ level1 . txt and
HTML tags / attributes are compared to tags _ level2 . txt
* level = 3 - >
HTML tags are compared to tags _ level1 . txt and
HTML tags + attributes are compared to tags _ level2 . txt and
HTML tags + attributes / values are compared to tags _ level3 . txt
As shown , Level - 3 is the most strict , Level - 1 is the least strict .
If the comparisons from the latest window to the existing baseline
don ' t match , the current test will fail , except for Level - 0 tests .
You can reset the visual baseline on the command line by using :
- - visual _ baseline
As long as " - - visual _ baseline " is used on the command line while
running tests , the self . check _ window ( ) method cannot fail because
it will rebuild the visual baseline rather than comparing the html
tags of the latest run to the existing baseline . If there are any
expected layout changes to a website that you ' re testing , you ' ll
need to reset the baseline to prevent unnecessary failures .
self . check _ window ( ) will fail with " Page Domain Mismatch Failure "
if the page domain doesn ' t match the domain of the baseline .
If you want to use self . check _ window ( ) to compare a web page to
a later version of itself from within the same test run , you can
add the parameter " baseline = True " to the first time you call
self . check _ window ( ) in a test to use that as the baseline . This
only makes sense if you ' re calling self . check _ window ( ) more than
once with the same name parameter in the same test .
Automated Visual Testing with self . check _ window ( ) is not very
effective for websites that have dynamic content that changes
the layout and structure of web pages . For those , you ' re much
better off using regular SeleniumBase functional testing .
Example usage :
self . check _ window ( name = " testing " , level = 0)
self . check _ window ( name = " xkcd _ home " , level = 1)
self . check _ window ( name = " github _ page " , level = 2)
self . check _ window ( name = " wikipedia _ page " , level = 3)"""
|
if level == "0" :
level = 0
if level == "1" :
level = 1
if level == "2" :
level = 2
if level == "3" :
level = 3
if level != 0 and level != 1 and level != 2 and level != 3 :
raise Exception ( 'Parameter "level" must be set to 0, 1, 2, or 3!' )
module = self . __class__ . __module__
if '.' in module and len ( module . split ( '.' ) [ - 1 ] ) > 1 :
module = module . split ( '.' ) [ - 1 ]
test_id = "%s.%s" % ( module , self . _testMethodName )
if not name or len ( name ) < 1 :
name = "default"
name = str ( name )
visual_helper . visual_baseline_folder_setup ( )
baseline_dir = constants . VisualBaseline . STORAGE_FOLDER
visual_baseline_path = baseline_dir + "/" + test_id + "/" + name
page_url_file = visual_baseline_path + "/page_url.txt"
screenshot_file = visual_baseline_path + "/screenshot.png"
level_1_file = visual_baseline_path + "/tags_level_1.txt"
level_2_file = visual_baseline_path + "/tags_level_2.txt"
level_3_file = visual_baseline_path + "/tags_level_3.txt"
set_baseline = False
if baseline or self . visual_baseline :
set_baseline = True
if not os . path . exists ( visual_baseline_path ) :
set_baseline = True
try :
os . makedirs ( visual_baseline_path )
except Exception :
pass
# Only reachable during multi - threaded test runs
if not os . path . exists ( page_url_file ) :
set_baseline = True
if not os . path . exists ( screenshot_file ) :
set_baseline = True
if not os . path . exists ( level_1_file ) :
set_baseline = True
if not os . path . exists ( level_2_file ) :
set_baseline = True
if not os . path . exists ( level_3_file ) :
set_baseline = True
page_url = self . get_current_url ( )
soup = self . get_beautiful_soup ( )
html_tags = soup . body . find_all ( )
level_1 = [ [ tag . name ] for tag in html_tags ]
level_1 = json . loads ( json . dumps ( level_1 ) )
# Tuples become lists
level_2 = [ [ tag . name , sorted ( tag . attrs . keys ( ) ) ] for tag in html_tags ]
level_2 = json . loads ( json . dumps ( level_2 ) )
# Tuples become lists
level_3 = [ [ tag . name , sorted ( tag . attrs . items ( ) ) ] for tag in html_tags ]
level_3 = json . loads ( json . dumps ( level_3 ) )
# Tuples become lists
if set_baseline :
self . save_screenshot ( "screenshot.png" , visual_baseline_path )
out_file = codecs . open ( page_url_file , "w+" )
out_file . writelines ( page_url )
out_file . close ( )
out_file = codecs . open ( level_1_file , "w+" )
out_file . writelines ( json . dumps ( level_1 ) )
out_file . close ( )
out_file = codecs . open ( level_2_file , "w+" )
out_file . writelines ( json . dumps ( level_2 ) )
out_file . close ( )
out_file = codecs . open ( level_3_file , "w+" )
out_file . writelines ( json . dumps ( level_3 ) )
out_file . close ( )
if not set_baseline :
f = open ( page_url_file , 'r' )
page_url_data = f . read ( ) . strip ( )
f . close ( )
f = open ( level_1_file , 'r' )
level_1_data = json . loads ( f . read ( ) )
f . close ( )
f = open ( level_2_file , 'r' )
level_2_data = json . loads ( f . read ( ) )
f . close ( )
f = open ( level_3_file , 'r' )
level_3_data = json . loads ( f . read ( ) )
f . close ( )
domain_fail = ( "Page Domain Mismatch Failure: " "Current Page Domain doesn't match the Page Domain of the " "Baseline! Can't compare two completely different sites! " "Run with --visual_baseline to reset the baseline!" )
level_1_failure = ( "\n\n*** Exception: <Level 1> Visual Diff Failure:\n" "* HTML tags don't match the baseline!" )
level_2_failure = ( "\n\n*** Exception: <Level 2> Visual Diff Failure:\n" "* HTML tag attributes don't match the baseline!" )
level_3_failure = ( "\n\n*** Exception: <Level 3> Visual Diff Failure:\n" "* HTML tag attribute values don't match the baseline!" )
page_domain = self . get_domain_url ( page_url )
page_data_domain = self . get_domain_url ( page_url_data )
unittest . TestCase . maxDiff = 1000
if level == 1 or level == 2 or level == 3 :
self . assert_equal ( page_domain , page_data_domain , domain_fail )
self . assert_equal ( level_1 , level_1_data , level_1_failure )
unittest . TestCase . maxDiff = None
if level == 2 or level == 3 :
self . assert_equal ( level_2 , level_2_data , level_2_failure )
if level == 3 :
self . assert_equal ( level_3 , level_3_data , level_3_failure )
if level == 0 :
try :
unittest . TestCase . maxDiff = 1000
self . assert_equal ( page_domain , page_data_domain , domain_fail )
self . assert_equal ( level_1 , level_1_data , level_1_failure )
unittest . TestCase . maxDiff = None
self . assert_equal ( level_2 , level_2_data , level_2_failure )
self . assert_equal ( level_3 , level_3_data , level_3_failure )
except Exception as e :
print ( e )
|
def nancorr ( a , b , method = 'pearson' , min_periods = None ) :
"""a , b : ndarrays"""
|
if len ( a ) != len ( b ) :
raise AssertionError ( 'Operands to nancorr must have same size' )
if min_periods is None :
min_periods = 1
valid = notna ( a ) & notna ( b )
if not valid . all ( ) :
a = a [ valid ]
b = b [ valid ]
if len ( a ) < min_periods :
return np . nan
f = get_corr_func ( method )
return f ( a , b )
|
def walk ( node ) :
"""Iterate over all nodes . This is useful if you only want to modify nodes in
place and don ' t care about the context or the order the nodes are returned ."""
|
from collections import deque
todo = deque ( [ node ] )
while todo :
node = todo . popleft ( )
todo . extend ( iter_child_nodes ( node ) )
yield node
|
def Unique ( a , t ) :
"""Unique op ."""
|
_ , idxs , inv = np . unique ( a , return_index = True , return_inverse = True )
return np . copy ( a ) [ np . sort ( idxs ) ] , idxs [ inv ] . astype ( dtype_map [ t ] )
|
def colorlog ( msg , color , bold = False , blink = False ) :
"""Colors messages on non - Windows systems supporting ANSI escape ."""
|
# ANSI Escape Codes
PINK_COL = '\x1b[35m'
GREEN_COL = '\x1b[32m'
RED_COL = '\x1b[31m'
YELLOW_COL = '\x1b[33m'
BLINK = '\x1b[5m'
RESET = '\x1b[0m'
if platform . system ( ) != 'Windows' :
if blink :
msg = BLINK + msg + RESET
if color == 'yellow' :
msg = YELLOW_COL + msg + RESET
if color == 'red' :
msg = RED_COL + msg + RESET
if color == 'green' :
msg = GREEN_COL + msg + RESET
if color == 'pink' :
msg = PINK_COL + msg + RESET
return msg
|
def check_isomorphism ( s1 : str , s2 : str ) -> bool :
"""A python function to assess if two given strings are isomorphic .
Isomorphism exists if every character in string1 can be replaced to get string2.
> > > check _ isomorphism ( ' paper ' , ' title ' )
True
> > > check _ isomorphism ( ' ab ' , ' ba ' )
True
> > > check _ isomorphism ( ' ab ' , ' aa ' )
False"""
|
map_s1 = { }
map_s2 = { }
for i , value in enumerate ( s1 ) :
map_s1 [ value ] = map_s1 . get ( value , [ ] ) + [ i ]
for i , value in enumerate ( s2 ) :
map_s2 [ value ] = map_s2 . get ( value , [ ] ) + [ i ]
return sorted ( map_s1 . values ( ) ) == sorted ( map_s2 . values ( ) )
|
def read_response ( self , delegate : httputil . HTTPMessageDelegate ) -> Awaitable [ bool ] :
"""Read a single HTTP response .
Typical client - mode usage is to write a request using ` write _ headers ` ,
` write ` , and ` finish ` , and then call ` ` read _ response ` ` .
: arg delegate : a ` . HTTPMessageDelegate `
Returns a ` . Future ` that resolves to a bool after the full response has
been read . The result is true if the stream is still open ."""
|
if self . params . decompress :
delegate = _GzipMessageDelegate ( delegate , self . params . chunk_size )
return self . _read_message ( delegate )
|
def match ( self , xn ) :
"""Processes a transaction against this rule
If all conditions are satisfied , a list of outcomes is returned .
If any condition is unsatisifed , None is returned ."""
|
if all ( map ( lambda x : x . match ( xn ) , self . conditions ) ) :
return self . outcomes
return None
|
def vcf2pileup ( vcf , sample ) :
'''convert vcf record to pileup record .'''
|
chromosome = vcf . contig
pos = vcf . pos
reference = vcf . ref
allelles = [ reference ] + vcf . alt
data = vcf [ sample ]
# get genotype
genotypes = data [ "GT" ]
if len ( genotypes ) > 1 :
raise ValueError ( "only single genotype per position, %s" % ( str ( vcf ) ) )
genotypes = genotypes [ 0 ]
# not a variant
if genotypes [ 0 ] == "." :
return None
genotypes = [ allelles [ int ( x ) ] for x in genotypes if x != "/" ]
# snp _ quality is " genotype quality "
snp_quality = consensus_quality = data . get ( "GQ" , [ 0 ] ) [ 0 ]
mapping_quality = vcf . info . get ( "MQ" , [ 0 ] ) [ 0 ]
coverage = data . get ( "DP" , 0 )
if len ( reference ) > 1 or max ( [ len ( x ) for x in vcf . alt ] ) > 1 : # indel
genotype , offset = translateIndelGenotypeFromVCF ( genotypes , reference )
return PileupIndel ( chromosome , pos + offset , "*" , genotype , consensus_quality , snp_quality , mapping_quality , coverage , genotype , "<" * len ( genotype ) , 0 , 0 , 0 )
else :
genotype = encodeGenotype ( "" . join ( genotypes ) )
read_bases = ""
base_qualities = ""
return PileupSubstitution ( chromosome , pos , reference , genotype , consensus_quality , snp_quality , mapping_quality , coverage , read_bases , base_qualities )
|
def get ( self , idx , default = None ) :
"""Return the first placeholder shape with matching * idx * value , or
* default * if not found ."""
|
for placeholder in self :
if placeholder . element . ph_idx == idx :
return placeholder
return default
|
def updateLodState ( self , verbose = None ) :
"""Switch between full graphics details < - - - > fast rendering mode .
Returns a success message .
: param verbose : print more
: returns : 200 : successful operation"""
|
response = api ( url = self . ___url + 'ui/lod' , method = "PUT" , verbose = verbose )
return response
|
def job_details ( job_id , connection = None ) :
"""Returns the job data with its scheduled timestamp .
: param job _ id : the ID of the job to retrieve ."""
|
if connection is None :
connection = r
data = connection . hgetall ( job_key ( job_id ) )
job_data = { 'id' : job_id , 'schedule_at' : int ( connection . zscore ( REDIS_KEY , job_id ) ) }
for key , value in data . items ( ) :
try :
decoded = value . decode ( 'utf-8' )
except UnicodeDecodeError :
decoded = value
if decoded . isdigit ( ) :
decoded = int ( decoded )
job_data [ key . decode ( 'utf-8' ) ] = decoded
return job_data
|
def centroid_distance ( item_a , time_a , item_b , time_b , max_value ) :
"""Euclidean distance between the centroids of item _ a and item _ b .
Args :
item _ a : STObject from the first set in ObjectMatcher
time _ a : Time integer being evaluated
item _ b : STObject from the second set in ObjectMatcher
time _ b : Time integer being evaluated
max _ value : Maximum distance value used as scaling value and upper constraint .
Returns :
Distance value between 0 and 1."""
|
ax , ay = item_a . center_of_mass ( time_a )
bx , by = item_b . center_of_mass ( time_b )
return np . minimum ( np . sqrt ( ( ax - bx ) ** 2 + ( ay - by ) ** 2 ) , max_value ) / float ( max_value )
|
def in_casapy ( helper , vis = None ) :
"""This function is run inside the weirdo casapy IPython environment ! A
strange set of modules is available , and the
` pwkit . environments . casa . scripting ` system sets up a very particular
environment to allow encapsulated scripting ."""
|
import numpy as np , sys
from correct_ant_posns import correct_ant_posns
info = correct_ant_posns ( vis , False )
if len ( info ) != 3 or info [ 0 ] != 0 or not len ( info [ 1 ] ) :
helper . die ( 'failed to fetch VLA antenna positions; got %r' , info )
antenna = info [ 1 ]
parameter = info [ 2 ]
with open ( helper . temppath ( 'info.npy' ) , 'wb' ) as f :
np . save ( f , antenna )
np . save ( f , parameter )
|
def start ( self , measurementId ) :
"""Posts to the target to tell it a named measurement is starting .
: param measurementId :"""
|
self . sendURL = self . rootURL + measurementId + '/' + self . deviceName
self . startResponseCode = self . _doPut ( self . sendURL )
|
def route_delete ( name , route_table , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Delete a route from a route table .
: param name : The route to delete .
: param route _ table : The route table containing the route .
: param resource _ group : The resource group name assigned to the
route table .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . route _ delete test - rt test - rt - table testgroup'''
|
result = False
netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs )
try :
route = netconn . routes . delete ( resource_group_name = resource_group , route_table_name = route_table , route_name = name )
route . wait ( )
result = True
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs )
return result
|
def go_to_parent_directory ( self ) :
"""Go to parent directory"""
|
self . chdir ( osp . abspath ( osp . join ( getcwd_or_home ( ) , os . pardir ) ) )
|
def escape_string ( value ) :
"""Converts a string to its S - expression representation , adding quotes
and escaping funny characters ."""
|
res = StringIO ( )
res . write ( '"' )
for c in value :
if c in CHAR_TO_ESCAPE :
res . write ( f'\\{CHAR_TO_ESCAPE[c]}' )
elif c . isprintable ( ) :
res . write ( c )
elif ord ( c ) < 0x100 :
res . write ( f'\\x{ord(c):02x}' )
elif ord ( c ) < 0x10000 :
res . write ( f'\\u{ord(c):04x}' )
else :
res . write ( f'\\U{ord(c):06x}' )
res . write ( '"' )
return res . getvalue ( )
|
def p0f ( pkt ) :
"""Passive OS fingerprinting : which OS emitted this TCP packet ?
p0f ( packet ) - > accuracy , [ list of guesses ]"""
|
db , sig = packet2p0f ( pkt )
if db :
pb = db . get_base ( )
else :
pb = [ ]
if not pb :
warning ( "p0f base empty." )
return [ ]
# s = len ( pb [ 0 ] [ 0 ] )
r = [ ]
max = len ( sig [ 4 ] . split ( "," ) ) + 5
for b in pb :
d = p0f_correl ( sig , b )
if d == max :
r . append ( ( b [ 6 ] , b [ 7 ] , b [ 1 ] - pkt [ IP ] . ttl ) )
return r
|
def log ( self , sequence , infoarray ) -> None :
"""Pass the given | IoSequence | to a suitable instance of
a | NetCDFVariableBase | subclass .
When writing data , the second argument should be an | InfoArray | .
When reading data , this argument is ignored . Simply pass | None | .
(1 ) We prepare some devices handling some sequences by applying
function | prepare _ io _ example _ 1 | . We limit our attention to the
returned elements , which handle the more diverse sequences :
> > > from hydpy . core . examples import prepare _ io _ example _ 1
> > > nodes , ( element1 , element2 , element3 ) = prepare _ io _ example _ 1 ( )
(2 ) We define some shortcuts for the sequences used in the
following examples :
> > > nied1 = element1 . model . sequences . inputs . nied
> > > nied2 = element2 . model . sequences . inputs . nied
> > > nkor2 = element2 . model . sequences . fluxes . nkor
> > > nkor3 = element3 . model . sequences . fluxes . nkor
(3 ) We define a function that logs these example sequences
to a given | NetCDFFile | object and prints some information
about the resulting object structure . Note that sequence
` nkor2 ` is logged twice , the first time with its original
time series data , the second time with averaged values :
> > > from hydpy import classname
> > > def test ( ncfile ) :
. . . ncfile . log ( nied1 , nied1 . series )
. . . ncfile . log ( nied2 , nied2 . series )
. . . ncfile . log ( nkor2 , nkor2 . series )
. . . ncfile . log ( nkor2 , nkor2 . average _ series ( ) )
. . . ncfile . log ( nkor3 , nkor3 . average _ series ( ) )
. . . for name , variable in ncfile . variables . items ( ) :
. . . print ( name , classname ( variable ) , variable . subdevicenames )
(4 ) We prepare a | NetCDFFile | object with both options
` flatten ` and ` isolate ` being disabled :
> > > from hydpy . core . netcdftools import NetCDFFile
> > > ncfile = NetCDFFile (
. . . ' model ' , flatten = False , isolate = False , timeaxis = 1 , dirpath = ' ' )
(5 ) We log all test sequences results in two | NetCDFVariableDeep |
and one | NetCDFVariableAgg | objects . To keep both NetCDF variables
related to | lland _ fluxes . NKor | distinguishable , the name
` flux _ nkor _ mean ` includes information about the kind of aggregation
performed :
> > > test ( ncfile )
input _ nied NetCDFVariableDeep ( ' element1 ' , ' element2 ' )
flux _ nkor NetCDFVariableDeep ( ' element2 ' , )
flux _ nkor _ mean NetCDFVariableAgg ( ' element2 ' , ' element3 ' )
(6 ) We confirm that the | NetCDFVariableBase | objects received
the required information :
> > > ncfile . flux _ nkor . element2 . sequence . descr _ device
' element2'
> > > ncfile . flux _ nkor . element2 . array
InfoArray ( [ [ 16 . , 17 . ] ,
[ 18 . , 19 . ] ,
[ 20 . , 21 . ] ,
[ 22 . , 23 . ] ] )
> > > ncfile . flux _ nkor _ mean . element2 . sequence . descr _ device
' element2'
> > > ncfile . flux _ nkor _ mean . element2 . array
InfoArray ( [ 16.5 , 18.5 , 20.5 , 22.5 ] )
(7 ) We again prepare a | NetCDFFile | object , but now with both
options ` flatten ` and ` isolate ` being enabled . To log test
sequences with their original time series data does now trigger
the initialisation of class | NetCDFVariableFlat | . When passing
aggregated data , nothing changes :
> > > ncfile = NetCDFFile (
. . . ' model ' , flatten = True , isolate = True , timeaxis = 1 , dirpath = ' ' )
> > > test ( ncfile )
input _ nied NetCDFVariableFlat ( ' element1 ' , ' element2 ' )
flux _ nkor NetCDFVariableFlat ( ' element2_0 ' , ' element2_1 ' )
flux _ nkor _ mean NetCDFVariableAgg ( ' element2 ' , ' element3 ' )
> > > ncfile . flux _ nkor . element2 . sequence . descr _ device
' element2'
> > > ncfile . flux _ nkor . element2 . array
InfoArray ( [ [ 16 . , 17 . ] ,
[ 18 . , 19 . ] ,
[ 20 . , 21 . ] ,
[ 22 . , 23 . ] ] )
> > > ncfile . flux _ nkor _ mean . element2 . sequence . descr _ device
' element2'
> > > ncfile . flux _ nkor _ mean . element2 . array
InfoArray ( [ 16.5 , 18.5 , 20.5 , 22.5 ] )
(8 ) We technically confirm that the ` isolate ` argument is passed
to the constructor of subclasses of | NetCDFVariableBase | correctly :
> > > from unittest . mock import patch
> > > with patch ( ' hydpy . core . netcdftools . NetCDFVariableFlat ' ) as mock :
. . . ncfile = NetCDFFile (
. . . ' model ' , flatten = True , isolate = False , timeaxis = 0,
. . . dirpath = ' ' )
. . . ncfile . log ( nied1 , nied1 . series )
. . . mock . assert _ called _ once _ with (
. . . name = ' input _ nied ' , timeaxis = 0 , isolate = False )"""
|
aggregated = ( ( infoarray is not None ) and ( infoarray . info [ 'type' ] != 'unmodified' ) )
descr = sequence . descr_sequence
if aggregated :
descr = '_' . join ( [ descr , infoarray . info [ 'type' ] ] )
if descr in self . variables :
var_ = self . variables [ descr ]
else :
if aggregated :
cls = NetCDFVariableAgg
elif self . _flatten :
cls = NetCDFVariableFlat
else :
cls = NetCDFVariableDeep
var_ = cls ( name = descr , isolate = self . _isolate , timeaxis = self . _timeaxis )
self . variables [ descr ] = var_
var_ . log ( sequence , infoarray )
|
def updateRPYLocations ( self ) :
'''Update the locations of roll , pitch , yaw text .'''
|
# Locations
self . rollText . set_position ( ( self . leftPos + ( self . vertSize / 10.0 ) , - 0.97 + ( 2 * self . vertSize ) - ( self . vertSize / 10.0 ) ) )
self . pitchText . set_position ( ( self . leftPos + ( self . vertSize / 10.0 ) , - 0.97 + self . vertSize - ( 0.5 * self . vertSize / 10.0 ) ) )
self . yawText . set_position ( ( self . leftPos + ( self . vertSize / 10.0 ) , - 0.97 ) )
# Font Size
self . rollText . set_size ( self . fontSize )
self . pitchText . set_size ( self . fontSize )
self . yawText . set_size ( self . fontSize )
|
async def getUpdates ( self , offset = None , limit = None , timeout = None , allowed_updates = None ) :
"""See : https : / / core . telegram . org / bots / api # getupdates"""
|
p = _strip ( locals ( ) )
return await self . _api_request ( 'getUpdates' , _rectify ( p ) )
|
def get_cert_file ( ) :
"""Get the certificates file for https"""
|
try :
current_path = os . path . realpath ( __file__ )
ca_cert_path = os . path . join ( current_path , ".." , ".." , ".." , "conf" , "cacert.pem" )
return os . path . abspath ( ca_cert_path )
except Exception :
return None
|
def sum_mags ( mags , weights = None ) :
"""Sum an array of magnitudes in flux space .
Parameters :
mags : array of magnitudes
weights : array of weights for each magnitude ( i . e . from a pdf )
Returns :
sum _ mag : the summed magnitude of all the stars"""
|
flux = 10 ** ( - np . asarray ( mags ) / 2.5 )
if weights is None :
return - 2.5 * np . log10 ( np . sum ( flux ) )
else :
return - 2.5 * np . log10 ( np . sum ( weights * flux ) )
|
def add_time_dependent_effects ( self , ts ) :
"""Given a timeseries , apply a model to it .
Parameters
ts :
Time series of i . i . d . observations as a Numpy array
returns the time series with added time - dependent effects as a Numpy array ."""
|
destts = Vectors . dense ( [ 0 ] * len ( ts ) )
result = self . _jmodel . addTimeDependentEffects ( _py2java ( self . _ctx , Vectors . dense ( ts ) ) , _py2java ( self . _ctx , destts ) )
return _java2py ( self . _ctx , result . toArray ( ) )
|
def _dmi_data ( dmi_raw , clean , fields ) :
'''Parse the raw DMIdecode output of a single handle
into a nice dict'''
|
dmi_data = { }
key = None
key_data = [ None , [ ] ]
for line in dmi_raw :
if re . match ( r'\t[^\s]+' , line ) : # Finish previous key
if key is not None : # log . debug ( ' Evaluating DMI key { 0 } : { 1 } ' . format ( key , key _ data ) )
value , vlist = key_data
if vlist :
if value is not None : # On the rare occasion
# ( I counted 1 on all systems we have )
# that there ' s both a value < and > a list
# just insert the value on top of the list
vlist . insert ( 0 , value )
dmi_data [ key ] = vlist
elif value is not None :
dmi_data [ key ] = value
# Family : Core i5
# Keyboard Password Status : Not Implemented
key , val = line . split ( ':' , 1 )
key = key . strip ( ) . lower ( ) . replace ( ' ' , '_' )
if ( clean and key == 'header_and_data' ) or ( fields and key not in fields ) :
key = None
continue
else :
key_data = [ _dmi_cast ( key , val . strip ( ) , clean ) , [ ] ]
elif key is None :
continue
elif re . match ( r'\t\t[^\s]+' , line ) : # Installable Languages : 1
# en - US
# Characteristics :
# PCI is supported
# PNP is supported
val = _dmi_cast ( key , line . strip ( ) , clean )
if val is not None : # log . debug ( ' DMI key % s gained list item % s ' , key , val )
key_data [ 1 ] . append ( val )
return dmi_data
|
def web_hook_receiver ( sender , ** kwargs ) :
"""Generic receiver for the web hook firing piece ."""
|
deployment = Deployment . objects . get ( pk = kwargs . get ( 'deployment_id' ) )
hooks = deployment . web_hooks
if not hooks :
return
for hook in hooks :
data = payload_generator ( deployment )
deliver_hook ( deployment , hook . url , data )
|
def pre_check ( self , data ) :
"""Count chars , words and sentences in the text ."""
|
sentences = len ( re . findall ( '[\.!?]+\W+' , data ) ) or 1
chars = len ( data ) - len ( re . findall ( '[^a-zA-Z0-9]' , data ) )
num_words = len ( re . findall ( '\s+' , data ) )
data = re . split ( '[^a-zA-Z]+' , data )
return data , sentences , chars , num_words
|
def _bind ( self ) :
"""bind to the ldap with the technical account"""
|
ldap_client = self . _connect ( )
try :
ldap_client . simple_bind_s ( self . binddn , self . bindpassword )
except Exception as e :
ldap_client . unbind_s ( )
self . _exception_handler ( e )
return ldap_client
|
def print_file ( self , f = sys . stdout , file_format = "cif" , tw = 0 ) :
"""Print : class : ` ~ nmrstarlib . nmrstarlib . CIFFile ` into a file or stdout .
: param io . StringIO f : writable file - like stream .
: param str file _ format : Format to use : ` cif ` or ` json ` .
: param int tw : Tab width .
: return : None
: rtype : : py : obj : ` None `"""
|
if file_format == "cif" :
for key in self . keys ( ) :
if key == u"data" :
print ( u"{}_{}" . format ( key , self [ key ] ) , file = f )
elif key . startswith ( u"comment" ) :
print ( u"{}" . format ( self [ key ] . strip ( ) ) , file = f )
elif key . startswith ( u"loop_" ) :
print ( u"{}loop_" . format ( tw * u" " ) , file = f )
self . print_loop ( key , f , file_format , tw )
else : # handle the NMR - Star " multiline string "
if self [ key ] . endswith ( u"\n" ) :
print ( u"{}_{}" . format ( tw * u" " , key ) , file = f )
print ( u";{};" . format ( self [ key ] ) , file = f )
# need to escape value with quotes ( i . e . u " ' { } ' " . format ( ) ) if value consists of two or more words
elif len ( self [ key ] . split ( ) ) > 1 :
print ( u"{}_{}\t {}" . format ( tw * u" " , key , u"'{}'" . format ( self [ key ] ) ) , file = f )
else :
print ( u"{}_{}\t {}" . format ( tw * u" " , key , self [ key ] ) , file = f )
elif file_format == "json" :
print ( self . _to_json ( ) , file = f )
|
def cache_get ( key ) :
"""Wrapper for ` ` cache . get ` ` . The expiry time for the cache entry
is stored with the entry . If the expiry time has past , put the
stale entry back into cache , and don ' t return it to trigger a
fake cache miss ."""
|
packed = cache . get ( _hashed_key ( key ) )
if packed is None :
return None
value , refresh_time , refreshed = packed
if ( time ( ) > refresh_time ) and not refreshed :
cache_set ( key , value , settings . CACHE_SET_DELAY_SECONDS , True )
return None
return value
|
def set_dependencies ( ctx , archive_name , dependency = None ) :
'''Set the dependencies of an archive'''
|
_generate_api ( ctx )
kwargs = _parse_dependencies ( dependency )
var = ctx . obj . api . get_archive ( archive_name )
var . set_dependencies ( dependencies = kwargs )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.