signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def backprop ( self , input_data , targets , cache = None ) :
"""Backpropagate through the logistic layer .
* * Parameters : * *
input _ data : ` ` GPUArray ` `
Inpute data to compute activations for .
targets : ` ` GPUArray ` `
The target values of the units .
cache : list of ` ` GPUArray ` `
Cache obtained from forward pass . If the cache is
provided , then the activations are not recalculated .
* * Returns : * *
gradients : tuple of ` ` GPUArray ` `
Gradients with respect to the weights and biases in the
form ` ` ( df _ weights , df _ biases ) ` ` .
df _ input : ` ` GPUArray ` `
Gradients with respect to the input .""" | if cache is not None :
activations = cache
else :
activations = self . feed_forward ( input_data , prediction = False )
if activations . shape != targets . shape :
raise ValueError ( 'Activations (shape = %s) and targets (shape = %s) are different sizes' % ( activations . shape , targets . shape ) )
delta = substract_matrix ( activations , targets )
nan_to_zeros ( delta , delta )
# Gradient wrt weights
df_W = linalg . dot ( input_data , delta , transa = 'T' )
# Gradient wrt bias
df_b = matrix_sum_out_axis ( delta , 0 )
# Gradient wrt input
df_input = linalg . dot ( delta , self . W , transb = 'T' )
# L1 penalty
if self . l1_penalty_weight :
df_W += self . l1_penalty_weight * sign ( self . W )
# L2 penalty
if self . l2_penalty_weight :
df_W += self . l2_penalty_weight * self . W
return ( df_W , df_b ) , df_input |
def write ( self ) -> None :
"""Call method | NetCDFFile . write | of all handled | NetCDFFile | objects .""" | if self . folders :
init = hydpy . pub . timegrids . init
timeunits = init . firstdate . to_cfunits ( 'hours' )
timepoints = init . to_timepoints ( 'hours' )
for folder in self . folders . values ( ) :
for file_ in folder . values ( ) :
file_ . write ( timeunits , timepoints ) |
def unload_ipython_extension ( ip ) :
"""Unload me as an IPython extension
Use : % unload _ ext wurlitzer""" | if not getattr ( ip , 'kernel' ) :
return
ip . events . unregister ( 'pre_execute' , sys_pipes_forever )
ip . events . unregister ( 'post_execute' , stop_sys_pipes ) |
def audit_ghosts ( self ) :
"""compare the list of configured jobs with the jobs in the state""" | print_header = True
for app_name in self . _get_ghosts ( ) :
if print_header :
print_header = False
print ( "Found the following in the state database but not " "available as a configured job:" )
print "\t%s" % ( app_name , ) |
def remove_empty_dirs ( path = None ) :
"""Recursively delete empty directories ; return True if everything was deleted .""" | if not path :
path = settings . MEDIA_ROOT
if not os . path . isdir ( path ) :
return False
listdir = [ os . path . join ( path , filename ) for filename in os . listdir ( path ) ]
if all ( list ( map ( remove_empty_dirs , listdir ) ) ) :
os . rmdir ( path )
return True
else :
return False |
def _prepare_files ( self , encoding ) :
"""private function to prepare content for paramType = form with File""" | content_type = 'multipart/form-data'
if self . __op . consumes and content_type not in self . __op . consumes :
raise errs . SchemaError ( 'content type {0} does not present in {1}' . format ( content_type , self . __op . consumes ) )
boundary = uuid4 ( ) . hex
content_type += '; boundary={0}'
content_type = content_type . format ( boundary )
# init stream
body = io . BytesIO ( )
w = codecs . getwriter ( encoding )
def append ( name , obj ) :
body . write ( six . b ( '--{0}\r\n' . format ( boundary ) ) )
# header
w ( body ) . write ( 'Content-Disposition: form-data; name="{0}"; filename="{1}"' . format ( name , obj . filename ) )
body . write ( six . b ( '\r\n' ) )
if 'Content-Type' in obj . header :
w ( body ) . write ( 'Content-Type: {0}' . format ( obj . header [ 'Content-Type' ] ) )
body . write ( six . b ( '\r\n' ) )
if 'Content-Transfer-Encoding' in obj . header :
w ( body ) . write ( 'Content-Transfer-Encoding: {0}' . format ( obj . header [ 'Content-Transfer-Encoding' ] ) )
body . write ( six . b ( '\r\n' ) )
body . write ( six . b ( '\r\n' ) )
# body
if not obj . data :
with open ( obj . filename , 'rb' ) as f :
body . write ( f . read ( ) )
else :
data = obj . data . read ( )
if isinstance ( data , six . text_type ) :
w ( body ) . write ( data )
else :
body . write ( data )
body . write ( six . b ( '\r\n' ) )
for k , v in self . __p [ 'formData' ] :
body . write ( six . b ( '--{0}\r\n' . format ( boundary ) ) )
w ( body ) . write ( 'Content-Disposition: form-data; name="{0}"' . format ( k ) )
body . write ( six . b ( '\r\n' ) )
body . write ( six . b ( '\r\n' ) )
w ( body ) . write ( v )
body . write ( six . b ( '\r\n' ) )
# begin of file section
for k , v in six . iteritems ( self . __p [ 'file' ] ) :
if isinstance ( v , list ) :
for vv in v :
append ( k , vv )
else :
append ( k , v )
# final boundary
body . write ( six . b ( '--{0}--\r\n' . format ( boundary ) ) )
return content_type , body . getvalue ( ) |
def tell ( self , msg , action , learnas = '' ) :
"""Tell what type of we are to process and what should be done
with that message . This includes setting or removing a local
or a remote database ( learning , reporting , forgetting , revoking ) .""" | action = _check_action ( action )
mode = learnas . upper ( )
headers = { 'Message-class' : '' , 'Set' : 'local' , }
if action == 'learn' :
if mode == 'SPAM' :
headers [ 'Message-class' ] = 'spam'
elif mode in [ 'HAM' , 'NOTSPAM' , 'NOT_SPAM' ] :
headers [ 'Message-class' ] = 'ham'
else :
raise SpamCError ( 'The learnas option is invalid' )
elif action == 'forget' :
del headers [ 'Message-class' ]
del headers [ 'Set' ]
headers [ 'Remove' ] = 'local'
elif action == 'report' :
headers [ 'Message-class' ] = 'spam'
headers [ 'Set' ] = 'local, remote'
elif action == 'revoke' :
headers [ 'Message-class' ] = 'ham'
headers [ 'Remove' ] = 'remote'
return self . perform ( 'TELL' , msg , headers ) |
def map_lazy ( self , target : Callable , map_iter : Sequence [ Any ] = None , * , map_args : Sequence [ Sequence [ Any ] ] = None , args : Sequence = None , map_kwargs : Sequence [ Mapping [ str , Any ] ] = None , kwargs : Mapping = None , pass_state : bool = False , num_chunks : int = None , ) -> SequenceTaskResult :
r"""Functional equivalent of ` ` map ( ) ` ` in - built function ,
but executed in a parallel fashion .
Distributes the iterables ,
provided in the ` ` map _ * ` ` arguments to ` ` num _ chunks ` ` no of worker nodes .
The idea is to :
1 . Split the the iterables provided in the ` ` map _ * ` ` arguments into ` ` num _ chunks ` ` no of equally sized chunks .
2 . Send these chunks to ` ` num _ chunks ` ` number of worker nodes .
3 . Wait for all these worker nodes to finish their task ( s ) .
4 . Combine the acquired results in the same sequence as provided in the ` ` map _ * ` ` arguments .
5 . Return the combined results .
* Steps 3-5 can be done lazily , on the fly with the help of an iterator *
: param target :
The ` ` Callable ` ` to be invoked inside a : py : class : ` Process ` .
* It is invoked with the following signature : *
` ` target ( map _ iter [ i ] , * map _ args [ i ] , * args , * * map _ kwargs [ i ] , * * kwargs ) ` `
* Where : *
- ` ` i ` ` is the index of n \ : sup : ` th ` element of the Iterable ( s ) provided in the ` ` map _ * ` ` arguments .
- ` ` args ` ` and ` ` kwargs ` ` are passed from the ` ` * * process _ kwargs ` ` .
The ` ` pass _ state ` ` Keyword Argument of allows you to include the ` ` state ` ` arg .
: param map _ iter :
A sequence whose elements are supplied as the * first * positional argument to the ` ` target ` ` .
: param map _ args :
A sequence whose elements are supplied as positional arguments ( ` ` * args ` ` ) to the ` ` target ` ` .
: param map _ kwargs :
A sequence whose elements are supplied as keyword arguments ( ` ` * * kwargs ` ` ) to the ` ` target ` ` .
: param args :
The argument tuple for ` ` target ` ` , supplied after ` ` map _ iter ` ` and ` ` map _ args ` ` .
By default , it is an empty ` ` tuple ` ` .
: param kwargs :
A dictionary of keyword arguments for ` ` target ` ` .
By default , it is an empty ` ` dict ` ` .
: param pass _ state :
Weather this process needs to access the state .
If this is set to ` ` False ` ` ,
then the ` ` state ` ` argument won ' t be provided to the ` ` target ` ` .
If this is set to ` ` True ` ` ,
then a : py : class : ` State ` object is provided as the first Argument to the ` ` target ` ` .
Unlike : py : class : ` Process ` it is set to ` ` False ` ` by default .
( To retain a similar API to in - built ` ` map ( ) ` ` )
: param num _ chunks :
The number of worker nodes to use .
By default , it is set to ` ` multiprocessing . cpu _ count ( ) ` `
( The number of CPU cores on your system )
: param lazy :
Wheteher to return immediately put
: return :
The result is quite similar to ` ` map ( ) ` ` in - built function .
It returns a : py : class : ` Iterable ` which contatins ,
the return values of the ` ` target ` ` function ,
when applied to every item of the Iterables provided in the ` ` map _ * ` ` arguments .
The actual " processing " starts as soon as you call this function .
The returned : py : class : ` Iterable ` only fetches the results from the worker processes .
. . note : :
- If ` ` len ( map _ iter ) ! = len ( maps _ args ) ! = len ( map _ kwargs ) ` ` ,
then the results will be cut - off at the shortest Sequence .
See : ref : ` worker _ map ` for Examples .""" | if num_chunks is None :
num_chunks = multiprocessing . cpu_count ( )
lengths = [ len ( i ) for i in ( map_iter , map_args , map_kwargs ) if i is not None ]
assert ( lengths ) , "At least one of `map_iter`, `map_args`, or `map_kwargs` must be provided as a non-empty Sequence."
length = min ( lengths )
assert ( length > num_chunks ) , "`length`(%d) cannot be less than `num_chunks`(%d)" % ( length , num_chunks )
chunk_length , extra = divmod ( length , num_chunks )
if extra :
chunk_length += 1
task_id = util . generate_task_id ( ( chunk_length , length , num_chunks ) )
iter_chunks = util . make_chunks ( map_iter , chunk_length , num_chunks )
args_chunks = util . make_chunks ( map_args , chunk_length , num_chunks )
kwargs_chunks = util . make_chunks ( map_kwargs , chunk_length , num_chunks )
target_bytes = serializer . dumps_fn ( target )
for index in range ( num_chunks ) :
params = ( iter_chunks [ index ] , args_chunks [ index ] , args , kwargs_chunks [ index ] , kwargs , )
task = ( params , pass_state , self . namespace )
self . _task_push . send_multipart ( [ util . encode_chunk_id ( task_id , index ) , target_bytes , serializer . dumps ( task ) , ] )
return SequenceTaskResult ( self . server_address , task_id ) |
def get_multiple_devices ( self , rids ) :
"""Implements the ' Get Multiple Devices ' API .
Param rids : a python list object of device rids .
http : / / docs . exosite . com / portals / # get - multiple - devices""" | headers = { 'User-Agent' : self . user_agent ( ) , 'Content-Type' : self . content_type ( ) }
headers . update ( self . headers ( ) )
url = self . portals_url ( ) + '/users/_this/devices/' + str ( rids ) . replace ( "'" , "" ) . replace ( ' ' , '' )
# print ( " URL : { 0 } " . format ( url ) )
r = requests . get ( url , headers = headers , auth = self . auth ( ) )
if HTTP_STATUS . OK == r . status_code : # TODO : loop through all rids and fix ' meta ' to be dict like add _ device and get _ device do
return r . json ( )
else :
print ( "get_multiple_devices: Something went wrong: <{0}>: {1}" . format ( r . status_code , r . reason ) )
r . raise_for_status ( ) |
def prod ( self ) :
"""Summary
Returns :
TYPE : Description""" | return LazyOpResult ( grizzly_impl . aggr ( self . expr , "*" , 1 , self . weld_type ) , self . weld_type , 0 ) |
def url ( self , var , default = NOTSET ) :
""": rtype : urlparse . ParseResult""" | return self . get_value ( var , cast = urlparse , default = default , parse_default = True ) |
def _require_min_api_version ( self , version ) :
"""Raise an exception if the version of the api is less than the given version .
@ param version : The minimum required version .""" | actual_version = self . _get_resource_root ( ) . version
version = max ( version , self . _api_version ( ) )
if actual_version < version :
raise Exception ( "API version %s is required but %s is in use." % ( version , actual_version ) ) |
def _validator ( key , val , env ) :
"""Validates the given value to be either ' 0 ' or ' 1 ' .
This is usable as ' validator ' for SCons ' Variables .""" | if not env [ key ] in ( True , False ) :
raise SCons . Errors . UserError ( 'Invalid value for boolean option %s: %s' % ( key , env [ key ] ) ) |
def check_convert_string ( obj , name = None , no_leading_trailing_whitespace = True , no_whitespace = False , no_newline = True , whole_word = False , min_len = 1 , max_len = 0 ) :
"""Ensures the provided object can be interpreted as a unicode string , optionally with
additional restrictions imposed . By default this means a non - zero length string
which does not begin or end in whitespace .""" | if not name :
name = 'Argument'
obj = ensure_unicode ( obj , name = name )
if no_whitespace :
if _PATTERN_WHITESPACE . match ( obj ) :
raise ValueError ( '%s cannot contain whitespace' % name )
elif no_leading_trailing_whitespace and _PATTERN_LEAD_TRAIL_WHITESPACE . match ( obj ) :
raise ValueError ( '%s contains leading/trailing whitespace' % name )
if ( min_len and len ( obj ) < min_len ) or ( max_len and len ( obj ) > max_len ) :
raise ValueError ( '%s too short/long (%d/%d)' % ( name , min_len , max_len ) )
if whole_word :
if not _PATTERN_WORD . match ( obj ) :
raise ValueError ( '%s can only contain alphanumeric (unicode) characters, numbers and the underscore' % name )
# whole words cannot contain newline so additional check not required
elif no_newline and '\n' in obj :
raise ValueError ( '%s cannot contain line breaks' % name )
return obj |
def plus ( self , a ) :
"""Add .""" | return Vector ( self . x + a . x , self . y + a . y , self . z + a . z ) |
def setup ( self , timezone = None ) : # pylint : disable = arguments - differ
"""Sets up the _ timezone attribute .
Args :
timezone : Timezone name ( optional )""" | self . _timezone = timezone
self . _output_path = tempfile . mkdtemp ( ) |
def db_wb010 ( self , value = None ) :
"""Corresponds to IDD Field ` db _ wb010 `
mean coincident dry - bulb temperature to
Wet - bulb temperature corresponding to 1.0 % annual cumulative frequency of occurrence
Args :
value ( float ) : value for IDD Field ` db _ wb010 `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `db_wb010`' . format ( value ) )
self . _db_wb010 = value |
def get ( self , layer , where = "1 = 1" , fields = [ ] , count_only = False , srid = '4326' ) :
"""Gets a layer and returns it as honest to God GeoJSON .
WHERE 1 = 1 causes us to get everything . We use OBJECTID in the WHERE clause
to paginate , so don ' t use OBJECTID in your WHERE clause unless you ' re going to
query under 1000 objects .""" | base_where = where
# By default we grab all of the fields . Technically I think
# we can just do " * " for all fields , but I found this was buggy in
# the KMZ mode . I ' d rather be explicit .
fields = fields or self . enumerate_layer_fields ( layer )
jsobj = self . get_json ( layer , where , fields , count_only , srid )
# Sometimes you just want to know how far there is to go .
if count_only :
return jsobj . get ( 'count' )
# If there is no geometry , we default to assuming it ' s a Table type
# data format , and we dump a simple ( non - geo ) json of all of the data .
if not jsobj . get ( 'geometryType' , None ) :
return self . getTable ( layer , where , fields , jsobj = jsobj )
# From what I can tell , the entire layer tends to be of the same type ,
# so we only have to determine the parsing function once .
geom_parser = self . _determine_geom_parser ( jsobj . get ( 'geometryType' ) )
features = [ ]
# We always want to run once , and then break out as soon as we stop
# getting exceededTransferLimit .
while True :
features += [ self . esri_to_geojson ( feat , geom_parser ) for feat in jsobj . get ( 'features' ) ]
if jsobj . get ( 'exceededTransferLimit' , False ) == False :
break
# If we ' ve hit the transfer limit we offset by the last OBJECTID
# returned and keep moving along .
where = "%s > %s" % ( self . object_id_field , features [ - 1 ] [ 'properties' ] . get ( self . object_id_field ) )
if base_where != "1 = 1" : # If we have another WHERE filter we needed to tack that back on .
where += " AND %s" % base_where
jsobj = self . get_json ( layer , where , fields , count_only , srid )
return { 'type' : "FeatureCollection" , 'features' : features } |
def _connected ( self , link_uri ) :
"""This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded .""" | print ( 'Connected to %s' % link_uri )
self . _is_link_open = True
self . _connect_event . set ( ) |
def signals_blocker ( instance , attribute , * args , ** kwargs ) :
"""Blocks given instance signals before calling the given attribute with given arguments and then unblocks the signals .
: param instance : Instance object .
: type instance : QObject
: param attribute : Attribute to call .
: type attribute : QObject
: param \ * args : Arguments .
: type \ * args : \ *
: param \ * \ * kwargs : Keywords arguments .
: type \ * \ * kwargs : \ * \ *
: return : Object .
: rtype : object""" | value = None
try :
hasattr ( instance , "blockSignals" ) and instance . blockSignals ( True )
value = attribute ( * args , ** kwargs )
finally :
hasattr ( instance , "blockSignals" ) and instance . blockSignals ( False )
return value |
def sqllab ( self ) :
"""SQL Editor""" | d = { 'defaultDbId' : config . get ( 'SQLLAB_DEFAULT_DBID' ) , 'common' : self . common_bootsrap_payload ( ) , }
return self . render_template ( 'superset/basic.html' , entry = 'sqllab' , bootstrap_data = json . dumps ( d , default = utils . json_iso_dttm_ser ) , ) |
def read_until_done ( self , command , timeout = None ) :
"""Yield messages read until we receive a ' DONE ' command .
Read messages of the given command until we receive a ' DONE ' command . If a
command different than the requested one is received , an AdbProtocolError
is raised .
Args :
command : The command to expect , like ' DENT ' or ' DATA ' .
timeout : The timeouts . PolledTimeout to use for this operation .
Yields :
Messages read , of type self . RECV _ MSG _ TYPE , see read _ message ( ) .
Raises :
AdbProtocolError : If an unexpected command is read .
AdbRemoteError : If a ' FAIL ' message is read .""" | message = self . read_message ( timeout )
while message . command != 'DONE' :
message . assert_command_is ( command )
yield message
message = self . read_message ( timeout ) |
def convert_vec2_to_vec4 ( scale , data ) :
"""transforms an array of 2d coords into 4d""" | it = iter ( data )
while True :
yield next ( it ) * scale
yield next ( it ) * scale
yield 0.0
yield 1.0 |
def parse_spss_datafile ( path , ** kwargs ) :
"""Parse spss data file
Arguments :
path { str } - - path al fichero de cabecera .
* * kwargs { [ dict ] } - - otros argumentos que puedan llegar""" | data_clean = [ ]
with codecs . open ( path , 'r' , kwargs . get ( 'encoding' , 'latin-1' ) ) as file_ :
raw_file = file_ . read ( )
data_clean = raw_file . split ( '\r\n' )
return exclude_empty_values ( data_clean ) |
def decorate_function ( self , name , decorator ) :
"""Decorate function with given name with given decorator .
: param str name : Name of the function .
: param callable decorator : Decorator callback .""" | self . functions [ name ] = decorator ( self . functions [ name ] ) |
def _original_images ( self , ** kwargs ) :
"""A list of the original images .
: returns : A list of the original images .
: rtype : : class : ` typing . Sequence ` \ [ : class : ` Image ` ]""" | def test ( image ) :
if not image . original :
return False
for filter , value in kwargs . items ( ) :
if getattr ( image , filter ) != value :
return False
return True
if Session . object_session ( self . instance ) is None :
images = [ ]
for image , store in self . _stored_images :
if test ( image ) :
images . append ( image )
state = instance_state ( self . instance )
try :
added = state . committed_state [ self . attr . key ] . added_items
except KeyError :
pass
else :
for image in added :
if test ( image ) :
images . append ( image )
if self . session :
for image in self . session . new :
if test ( image ) :
images . append ( image )
else :
query = self . filter_by ( original = True , ** kwargs )
images = query . all ( )
return images |
def set_tolerance_value ( self , tolerance ) :
"""stub""" | # include index because could be multiple response / tolerance pairs
if not isinstance ( tolerance , float ) :
raise InvalidArgument ( 'tolerance value must be a decimal' )
self . add_decimal_value ( tolerance , 'tolerance' ) |
def check_internet_on ( secrets_file_path ) :
"""If internet on and USB unplugged , returns true . Else , continues to wait . . .""" | while True :
if internet_on ( ) is True and not os . path . exists ( secrets_file_path ) :
break
else :
print ( "Turn on your internet and unplug your USB to continue..." )
time . sleep ( 10 )
return True |
def _extract_parameters_from_properties ( properties ) :
"""Extracts parameters from properties .""" | new_properties = { }
parameters = [ ]
for key , value in six . iteritems ( properties ) :
if key . startswith ( _PARAMETER_PREFIX ) :
parameters . append ( ( key . replace ( _PARAMETER_PREFIX , "" ) , value ) )
else :
new_properties [ key ] = value
return new_properties , sorted ( parameters ) |
def authenticate_ldap ( self , username , password ) :
"""Get user by username and password and their permissions .
: param username : Username . String with a minimum 3 and maximum of 45 characters
: param password : User password . String with a minimum 3 and maximum of 45 characters
: return : Following dictionary :
{ ' user ' : { ' id ' : < id > }
{ ' user ' : < user > }
{ ' nome ' : < nome > }
{ ' pwd ' : < pwd > }
{ ' email ' : < email > }
{ ' active ' : < active > }
{ ' permission ' : [ { ' < function > ' : { ' write ' : < value > , ' read ' : < value > } , . . . more function . . . ] } } }
: raise InvalidParameterError : The value of username or password is invalid .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | user_map = dict ( )
user_map [ 'username' ] = username
user_map [ 'password' ] = password
code , xml = self . submit ( { 'user' : user_map } , 'POST' , 'authenticate/ldap/' )
return self . response ( code , xml ) |
def create ( cls , train_ds , valid_ds , test_ds = None , path : PathOrStr = '.' , bs : int = 32 , val_bs : int = None , pad_idx = 1 , pad_first = True , device : torch . device = None , no_check : bool = False , backwards : bool = False , ** dl_kwargs ) -> DataBunch :
"Function that transform the ` datasets ` in a ` DataBunch ` for classification . Passes ` * * dl _ kwargs ` on to ` DataLoader ( ) `" | datasets = cls . _init_ds ( train_ds , valid_ds , test_ds )
val_bs = ifnone ( val_bs , bs )
collate_fn = partial ( pad_collate , pad_idx = pad_idx , pad_first = pad_first , backwards = backwards )
train_sampler = SortishSampler ( datasets [ 0 ] . x , key = lambda t : len ( datasets [ 0 ] [ t ] [ 0 ] . data ) , bs = bs )
train_dl = DataLoader ( datasets [ 0 ] , batch_size = bs , sampler = train_sampler , drop_last = True , ** dl_kwargs )
dataloaders = [ train_dl ]
for ds in datasets [ 1 : ] :
lengths = [ len ( t ) for t in ds . x . items ]
sampler = SortSampler ( ds . x , key = lengths . __getitem__ )
dataloaders . append ( DataLoader ( ds , batch_size = val_bs , sampler = sampler , ** dl_kwargs ) )
return cls ( * dataloaders , path = path , device = device , collate_fn = collate_fn , no_check = no_check ) |
def _Triple ( S ) :
"""Procedure to calculate the triple point pressure and temperature for
seawater
Parameters
S : float
Salinity , [ kg / kg ]
Returns
prop : dict
Dictionary with the triple point properties :
* Tt : Triple point temperature , [ K ]
* Pt : Triple point pressure , [ MPa ]
References
IAPWS , Advisory Note No . 5 : Industrial Calculation of the Thermodynamic
Properties of Seawater , http : / / www . iapws . org / relguide / Advise5 . html , Eq 7""" | def f ( parr ) :
T , P = parr
pw = _Region1 ( T , P )
gw = pw [ "h" ] - T * pw [ "s" ]
pv = _Region2 ( T , P )
gv = pv [ "h" ] - T * pv [ "s" ]
gih = _Ice ( T , P ) [ "g" ]
ps = SeaWater . _saline ( T , P , S )
return - ps [ "g" ] + S * ps [ "gs" ] - gw + gih , - ps [ "g" ] + S * ps [ "gs" ] - gw + gv
Tt , Pt = fsolve ( f , [ 273 , 6e-4 ] )
prop = { }
prop [ "Tt" ] = Tt
prop [ "Pt" ] = Pt
return prop |
def get_secret ( self , filename , secret , type_ = None ) :
"""Checks to see whether a secret is found in the collection .
: type filename : str
: param filename : the file to search in .
: type secret : str
: param secret : secret hash of secret to search for .
: type type _ : str
: param type _ : type of secret , if known .
: rtype : PotentialSecret | None""" | if filename not in self . data :
return None
if type_ : # Optimized lookup , because we know the type of secret
# ( and therefore , its hash )
tmp_secret = PotentialSecret ( type_ , filename , secret = 'will be overriden' )
tmp_secret . secret_hash = secret
if tmp_secret in self . data [ filename ] :
return self . data [ filename ] [ tmp_secret ]
return None
# NOTE : We can only optimize this , if we knew the type of secret .
# Otherwise , we need to iterate through the set and find out .
for obj in self . data [ filename ] :
if obj . secret_hash == secret :
return obj
return None |
def get_copy_folder_location ( ) :
"""Try to locate the Copy folder .
Returns :
( str ) Full path to the current Copy folder""" | copy_settings_path = 'Library/Application Support/Copy Agent/config.db'
copy_home = None
copy_settings = os . path . join ( os . environ [ 'HOME' ] , copy_settings_path )
if os . path . isfile ( copy_settings ) :
database = sqlite3 . connect ( copy_settings )
if database :
cur = database . cursor ( )
query = ( "SELECT value " "FROM config2 " "WHERE option = 'csmRootPath';" )
cur . execute ( query )
data = cur . fetchone ( )
copy_home = str ( data [ 0 ] )
cur . close ( )
if not copy_home :
error ( "Unable to find your Copy install =(" )
return copy_home |
def save ( obj , filename , protocol = 4 ) :
"""Serialize an object to disk using pickle protocol .
Args :
obj : The object to serialize .
filename : Path to the output file .
protocol : Version of the pickle protocol .""" | with open ( filename , 'wb' ) as f :
pickle . dump ( obj , f , protocol = protocol ) |
def group ( iterable , key ) :
"""groupby which sorts the input , discards the key and returns the output
as a sequence of lists .""" | for _ , grouped in groupby ( sorted ( iterable , key = key ) , key = key ) :
yield list ( grouped ) |
def write_ndjson ( filename , data_iterable , append = False , ** kwargs ) :
"""Generator that writes newline - delimited json to a file and returns items
from an iterable .""" | write_mode = "ab" if append else "wb"
logger . info ( "writing to file {}" . format ( filename ) )
with codecs . open ( filename , write_mode , "utf-8" ) as outfile :
for item in data_iterable :
outfile . write ( json . dumps ( item ) + "\n" )
yield item |
def _onShortcutCutLine ( self ) :
"""Cut selected lines to the clipboard""" | lines = self . lines [ self . _selectedLinesSlice ( ) ]
self . _onShortcutCopyLine ( )
self . _onShortcutDeleteLine ( ) |
def time_partitioning ( self ) :
"""google . cloud . bigquery . table . TimePartitioning : Specifies time - based
partitioning for the destination table .""" | prop = self . _get_sub_prop ( "timePartitioning" )
if prop is not None :
prop = TimePartitioning . from_api_repr ( prop )
return prop |
def snmp_server_user_auth_password ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
user = ET . SubElement ( snmp_server , "user" )
username_key = ET . SubElement ( user , "username" )
username_key . text = kwargs . pop ( 'username' )
auth_password = ET . SubElement ( user , "auth-password" )
auth_password . text = kwargs . pop ( 'auth_password' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def fix_tag ( value ) :
"""Make a FIX tag value from string , bytes , or integer .""" | if sys . version_info [ 0 ] == 2 :
return bytes ( value )
else :
if type ( value ) == bytes :
return value
elif type ( value ) == str :
return value . encode ( 'ASCII' )
return str ( value ) . encode ( 'ASCII' ) |
def facts ( ) :
'''Displays the facts gathered during the connection .
These facts are also stored in Salt grains .
CLI Example :
. . code - block : : bash
salt ' device _ name ' junos . facts''' | ret = { }
try :
ret [ 'facts' ] = __proxy__ [ 'junos.get_serialized_facts' ] ( )
ret [ 'out' ] = True
except Exception as exception :
ret [ 'message' ] = 'Could not display facts due to "{0}"' . format ( exception )
ret [ 'out' ] = False
return ret |
def get_numeric_score_increment_metadata ( self ) :
"""Gets the metadata for the lowest numeric score .
return : ( osid . Metadata ) - metadata for the lowest numeric score
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for osid . resource . ResourceForm . get _ group _ metadata _ template
metadata = dict ( self . _mdata [ 'numeric_score_increment' ] )
metadata . update ( { 'existing_decimal_values' : self . _my_map [ 'numericScoreIncrement' ] } )
return Metadata ( ** metadata ) |
def bump_version ( ctx , new_version , version_part = None , dry_run = False ) :
"""Bump version ( to prepare a new release ) .""" | version_part = version_part or "minor"
if dry_run :
ctx = DryRunContext ( ctx )
ctx . run ( "bumpversion --new-version={} {}" . format ( new_version , version_part ) ) |
def _edges_classify_intersection9 ( ) :
"""The edges for the curved polygon intersection used below .
Helper for : func : ` classify _ intersection9 ` .""" | edges1 = ( bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 32.0 , 30.0 ] , [ 20.0 , 25.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 30.0 , 25.0 , 20.0 ] , [ 25.0 , 20.0 , 20.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 20.0 , 25.0 , 30.0 ] , [ 20.0 , 20.0 , 15.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 30.0 , 32.0 ] , [ 15.0 , 20.0 ] ] ) ) , )
edges2 = ( bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 8.0 , 10.0 ] , [ 20.0 , 15.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 10.0 , 15.0 , 20.0 ] , [ 15.0 , 20.0 , 20.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 20.0 , 15.0 , 10.0 ] , [ 20.0 , 20.0 , 25.0 ] ] ) ) , bezier . Curve . from_nodes ( np . asfortranarray ( [ [ 10.0 , 8.0 ] , [ 25.0 , 20.0 ] ] ) ) , )
return edges1 , edges2 |
def add ( self , item_numid , collect_type , shared , session ) :
'''taobao . favorite . add 添加收藏夹
根据用户昵称和收藏目标的数字id以及收藏目标的类型 , 实现收藏行为''' | request = TOPRequest ( 'taobao.favorite.add' )
request [ 'item_numid' ] = item_numid
request [ 'collect_type' ] = collect_type
request [ 'shared' ] = shared
self . create ( self . execute ( request , session ) )
return self . result |
def get_theme_style ( theme ) :
"""read - in theme style info and populate styleMap ( dict of with mpl . rcParams )
and clist ( list of hex codes passed to color cylcler )
: : Arguments : :
theme ( str ) : theme name
: : Returns : :
styleMap ( dict ) : dict containing theme - specific colors for figure properties
clist ( list ) : list of colors to replace mpl ' s default color _ cycle""" | styleMap , clist = get_default_jtstyle ( )
if theme == 'default' :
return styleMap , clist
syntaxVars = [ '@yellow:' , '@orange:' , '@red:' , '@magenta:' , '@violet:' , '@blue:' , '@cyan:' , '@green:' ]
get_hex_code = lambda line : line . split ( ':' ) [ - 1 ] . split ( ';' ) [ 0 ] [ - 7 : ]
themeFile = os . path . join ( styles_dir , theme + '.less' )
with open ( themeFile ) as f :
for line in f :
for k , v in styleMap . items ( ) :
if k in line . strip ( ) :
styleMap [ k ] = get_hex_code ( line )
for c in syntaxVars :
if c in line . strip ( ) :
syntaxVars [ syntaxVars . index ( c ) ] = get_hex_code ( line )
# remove duplicate hexcolors
syntaxVars = list ( set ( syntaxVars ) )
clist . extend ( syntaxVars )
clist = remove_non_colors ( clist )
return styleMap , clist |
def is_same ( self , DataStruct ) :
"""判断是否相同""" | if self . type == DataStruct . type and self . if_fq == DataStruct . if_fq :
return True
else :
return False |
def forwards ( self , orm ) :
"Write your forwards methods here ." | for qde_xtf in orm [ 'xtf.QualifiedDublinCoreElement' ] . objects . all ( ) . order_by ( 'id' ) :
qde = orm . QualifiedDublinCoreElement ( )
qde . content = qde_xtf . content
qde . term = qde_xtf . term
qde . qualifier = qde_xtf . qualifier
# import pdb ; pdb . set _ trace ( )
c = orm [ 'contenttypes.ContentType' ] . objects . get ( pk = qde_xtf . content_type . pk )
# c . name = qde _ xtf . content _ type . name
# c . app _ label = qde _ xtf . content _ type . app _ label
# c . model = qde _ xtf . content _ type . model
qde . content_type = c
# qde . content _ type = qde _ xtf . content _ type
qde . object_id = qde_xtf . object_id
qde . save ( )
for qdeh_xtf in orm [ 'xtf.QualifiedDublinCoreElementHistory' ] . objects . all ( ) . order_by ( 'id' ) :
qdeh = orm . QualifiedDublinCoreElementHistory ( )
qdeh . content = qdeh_xtf . content
qdeh . term = qdeh_xtf . term
qdeh . qualifier = qdeh_xtf . qualifier
c = orm [ 'contenttypes.ContentType' ] . objects . get ( pk = qdeh_xtf . content_type . pk )
# c . name = qdeh _ xtf . content _ type . name
# c . app _ label = qdeh _ xtf . content _ type . app _ label
# c . model = qdeh _ xtf . content _ type . model
qdeh . content_type = c
# qdeh . content _ type = qdeh _ xtf . content _ type
qdeh . object_id = qdeh_xtf . object_id
qdeh . qdce = orm [ 'dublincore.QualifiedDublinCoreElement' ] . objects . get ( pk = qdeh_xtf . qdce . pk )
qdeh . qdce_id_stored = qdeh_xtf . qdce_id_stored
qdeh . save ( ) |
def native ( self , value , context = None ) :
"""Convert a value from a foriegn type ( i . e . web - safe ) to Python - native .""" | value = super ( ) . native ( value , context )
if value is None :
return
try :
return self . ingress ( value )
except Exception as e :
raise Concern ( "Unable to transform incoming value: {0}" , str ( e ) ) |
def states_close ( state0 : State , state1 : State , tolerance : float = TOLERANCE ) -> bool :
"""Returns True if states are almost identical .
Closeness is measured with the metric Fubini - Study angle .""" | return vectors_close ( state0 . vec , state1 . vec , tolerance ) |
def send ( self , topic , value = None , key = None , headers = None , partition = None , timestamp_ms = None ) :
"""Publish a message to a topic .
Arguments :
topic ( str ) : topic where the message will be published
value ( optional ) : message value . Must be type bytes , or be
serializable to bytes via configured value _ serializer . If value
is None , key is required and message acts as a ' delete ' .
See kafka compaction documentation for more details :
https : / / kafka . apache . org / documentation . html # compaction
( compaction requires kafka > = 0.8.1)
partition ( int , optional ) : optionally specify a partition . If not
set , the partition will be selected using the configured
' partitioner ' .
key ( optional ) : a key to associate with the message . Can be used to
determine which partition to send the message to . If partition
is None ( and producer ' s partitioner config is left as default ) ,
then messages with the same key will be delivered to the same
partition ( but if key is None , partition is chosen randomly ) .
Must be type bytes , or be serializable to bytes via configured
key _ serializer .
headers ( optional ) : a list of header key value pairs . List items
are tuples of str key and bytes value .
timestamp _ ms ( int , optional ) : epoch milliseconds ( from Jan 1 1970 UTC )
to use as the message timestamp . Defaults to current time .
Returns :
FutureRecordMetadata : resolves to RecordMetadata
Raises :
KafkaTimeoutError : if unable to fetch topic metadata , or unable
to obtain memory buffer prior to configured max _ block _ ms""" | assert value is not None or self . config [ 'api_version' ] >= ( 0 , 8 , 1 ) , ( 'Null messages require kafka >= 0.8.1' )
assert not ( value is None and key is None ) , 'Need at least one: key or value'
key_bytes = value_bytes = None
try :
self . _wait_on_metadata ( topic , self . config [ 'max_block_ms' ] / 1000.0 )
key_bytes = self . _serialize ( self . config [ 'key_serializer' ] , topic , key )
value_bytes = self . _serialize ( self . config [ 'value_serializer' ] , topic , value )
assert type ( key_bytes ) in ( bytes , bytearray , memoryview , type ( None ) )
assert type ( value_bytes ) in ( bytes , bytearray , memoryview , type ( None ) )
partition = self . _partition ( topic , partition , key , value , key_bytes , value_bytes )
if headers is None :
headers = [ ]
assert type ( headers ) == list
assert all ( type ( item ) == tuple and len ( item ) == 2 and type ( item [ 0 ] ) == str and type ( item [ 1 ] ) == bytes for item in headers )
message_size = self . _estimate_size_in_bytes ( key_bytes , value_bytes , headers )
self . _ensure_valid_record_size ( message_size )
tp = TopicPartition ( topic , partition )
log . debug ( "Sending (key=%r value=%r headers=%r) to %s" , key , value , headers , tp )
result = self . _accumulator . append ( tp , timestamp_ms , key_bytes , value_bytes , headers , self . config [ 'max_block_ms' ] , estimated_size = message_size )
future , batch_is_full , new_batch_created = result
if batch_is_full or new_batch_created :
log . debug ( "Waking up the sender since %s is either full or" " getting a new batch" , tp )
self . _sender . wakeup ( )
return future
# handling exceptions and record the errors ;
# for API exceptions return them in the future ,
# for other exceptions raise directly
except Errors . BrokerResponseError as e :
log . debug ( "Exception occurred during message send: %s" , e )
return FutureRecordMetadata ( FutureProduceResult ( TopicPartition ( topic , partition ) ) , - 1 , None , None , len ( key_bytes ) if key_bytes is not None else - 1 , len ( value_bytes ) if value_bytes is not None else - 1 , sum ( len ( h_key . encode ( "utf-8" ) ) + len ( h_value ) for h_key , h_value in headers ) if headers else - 1 , ) . failure ( e ) |
def traceroute ( self , destination , source = c . TRACEROUTE_SOURCE , ttl = c . TRACEROUTE_TTL , timeout = c . TRACEROUTE_TIMEOUT , vrf = c . TRACEROUTE_VRF , ) :
"""Executes traceroute on the device and returns a dictionary with the result .
: param destination : Host or IP Address of the destination
: param source ( optional ) : Use a specific IP Address to execute the traceroute
: param ttl ( optional ) : Maimum number of hops
: param timeout ( optional ) : Number of seconds to wait for response
Output dictionary has one of the following keys :
* success
* error
In case of success , the keys of the dictionary represent the hop ID , while values are
dictionaries containing the probes results :
* rtt ( float )
* ip _ address ( str )
* host _ name ( str )
Example : :
' success ' : {
' probes ' : {
' rtt ' : 1.123,
' ip _ address ' : u ' 206.223.116.21 ' ,
' host _ name ' : u ' eqixsj - google - gige . google . com '
' rtt ' : 1.91000001,
' ip _ address ' : u ' 206.223.116.21 ' ,
' host _ name ' : u ' eqixsj - google - gige . google . com '
' rtt ' : 3.347,
' ip _ address ' : u ' 198.32.176.31 ' ,
' host _ name ' : u ' core2-1-1-0 . pao . net . google . com ' }
' probes ' : {
' rtt ' : 1.586,
' ip _ address ' : u ' 209.85.241.171 ' ,
' host _ name ' : u ' 209.85.241.171'
' rtt ' : 1.63000001,
' ip _ address ' : u ' 209.85.241.171 ' ,
' host _ name ' : u ' 209.85.241.171'
' rtt ' : 1.648000001,
' ip _ address ' : u ' 209.85.241.171 ' ,
' host _ name ' : u ' 209.85.241.171 ' }
' probes ' : {
' rtt ' : 2.529,
' ip _ address ' : u ' 216.239.49.123 ' ,
' host _ name ' : u ' 216.239.49.123 ' } ,
' rtt ' : 2.474,
' ip _ address ' : u ' 209.85.255.255 ' ,
' host _ name ' : u ' 209.85.255.255'
' rtt ' : 7.813,
' ip _ address ' : u ' 216.239.58.193 ' ,
' host _ name ' : u ' 216.239.58.193 ' }
' probes ' : {
' rtt ' : 1.361,
' ip _ address ' : u ' 8.8.8.8 ' ,
' host _ name ' : u ' google - public - dns - a . google . com '
' rtt ' : 1.605,
' ip _ address ' : u ' 8.8.8.8 ' ,
' host _ name ' : u ' google - public - dns - a . google . com '
' rtt ' : 0.989,
' ip _ address ' : u ' 8.8.8.8 ' ,
' host _ name ' : u ' google - public - dns - a . google . com ' }
OR
' error ' : ' unknown host 8.8.8.8.8'""" | raise NotImplementedError |
def do_get_next ( endpoint , access_token ) :
'''Do an HTTP GET request , follow the nextLink chain and return JSON .
Args :
endpoint ( str ) : Azure Resource Manager management endpoint .
access _ token ( str ) : A valid Azure authentication token .
Returns :
HTTP response . JSON body .''' | headers = { "Authorization" : 'Bearer ' + access_token }
headers [ 'User-Agent' ] = get_user_agent ( )
looping = True
value_list = [ ]
vm_dict = { }
while looping :
get_return = requests . get ( endpoint , headers = headers ) . json ( )
if not 'value' in get_return :
return get_return
if not 'nextLink' in get_return :
looping = False
else :
endpoint = get_return [ 'nextLink' ]
value_list += get_return [ 'value' ]
vm_dict [ 'value' ] = value_list
return vm_dict |
def required_types ( self ) :
"""Set of names of types which the Command depends on .""" | required_types = set ( x . type for x in self . params )
required_types . add ( self . type )
required_types . discard ( None )
return required_types |
def _query_by_distro ( self , table_name ) :
"""Query for download data broken down by OS distribution , for one day .
: param table _ name : table name to query against
: type table _ name : str
: return : dict of download information by distro ; keys are project name ,
values are a dict of distro names to dicts of distro version to
download count .
: rtype : dict""" | logger . info ( 'Querying for downloads by distro in table %s' , table_name )
q = "SELECT file.project, details.distro.name, " "details.distro.version, COUNT(*) as dl_count " "%s " "%s " "GROUP BY file.project, details.distro.name, " "details.distro.version;" % ( self . _from_for_table ( table_name ) , self . _where_for_projects )
res = self . _run_query ( q )
result = self . _dict_for_projects ( )
# iterate through results
for row in res : # pointer to the per - project result dict
proj = result [ row [ 'file_project' ] ]
# grab the name and version ; change None to ' unknown '
dname = row [ 'details_distro_name' ]
dver = row [ 'details_distro_version' ]
if dname not in proj :
proj [ dname ] = { }
if dver not in proj [ dname ] :
proj [ dname ] [ dver ] = 0
proj [ dname ] [ dver ] += int ( row [ 'dl_count' ] )
return result |
def parse_frame ( self , buf : bytes ) -> List [ Tuple [ bool , Optional [ int ] , bytearray , Optional [ bool ] ] ] :
"""Return the next frame from the socket .""" | frames = [ ]
if self . _tail :
buf , self . _tail = self . _tail + buf , b''
start_pos = 0
buf_length = len ( buf )
while True : # read header
if self . _state == WSParserState . READ_HEADER :
if buf_length - start_pos >= 2 :
data = buf [ start_pos : start_pos + 2 ]
start_pos += 2
first_byte , second_byte = data
fin = ( first_byte >> 7 ) & 1
rsv1 = ( first_byte >> 6 ) & 1
rsv2 = ( first_byte >> 5 ) & 1
rsv3 = ( first_byte >> 4 ) & 1
opcode = first_byte & 0xf
# frame - fin = % x0 ; more frames of this message follow
# / % x1 ; final frame of this message
# frame - rsv1 = % x0 ;
# 1 bit , MUST be 0 unless negotiated otherwise
# frame - rsv2 = % x0 ;
# 1 bit , MUST be 0 unless negotiated otherwise
# frame - rsv3 = % x0 ;
# 1 bit , MUST be 0 unless negotiated otherwise
# Remove rsv1 from this test for deflate development
if rsv2 or rsv3 or ( rsv1 and not self . _compress ) :
raise WebSocketError ( WSCloseCode . PROTOCOL_ERROR , 'Received frame with non-zero reserved bits' )
if opcode > 0x7 and fin == 0 :
raise WebSocketError ( WSCloseCode . PROTOCOL_ERROR , 'Received fragmented control frame' )
has_mask = ( second_byte >> 7 ) & 1
length = second_byte & 0x7f
# Control frames MUST have a payload
# length of 125 bytes or less
if opcode > 0x7 and length > 125 :
raise WebSocketError ( WSCloseCode . PROTOCOL_ERROR , 'Control frame payload cannot be ' 'larger than 125 bytes' )
# Set compress status if last package is FIN
# OR set compress status if this is first fragment
# Raise error if not first fragment with rsv1 = 0x1
if self . _frame_fin or self . _compressed is None :
self . _compressed = True if rsv1 else False
elif rsv1 :
raise WebSocketError ( WSCloseCode . PROTOCOL_ERROR , 'Received frame with non-zero reserved bits' )
self . _frame_fin = bool ( fin )
self . _frame_opcode = opcode
self . _has_mask = bool ( has_mask )
self . _payload_length_flag = length
self . _state = WSParserState . READ_PAYLOAD_LENGTH
else :
break
# read payload length
if self . _state == WSParserState . READ_PAYLOAD_LENGTH :
length = self . _payload_length_flag
if length == 126 :
if buf_length - start_pos >= 2 :
data = buf [ start_pos : start_pos + 2 ]
start_pos += 2
length = UNPACK_LEN2 ( data ) [ 0 ]
self . _payload_length = length
self . _state = ( WSParserState . READ_PAYLOAD_MASK if self . _has_mask else WSParserState . READ_PAYLOAD )
else :
break
elif length > 126 :
if buf_length - start_pos >= 8 :
data = buf [ start_pos : start_pos + 8 ]
start_pos += 8
length = UNPACK_LEN3 ( data ) [ 0 ]
self . _payload_length = length
self . _state = ( WSParserState . READ_PAYLOAD_MASK if self . _has_mask else WSParserState . READ_PAYLOAD )
else :
break
else :
self . _payload_length = length
self . _state = ( WSParserState . READ_PAYLOAD_MASK if self . _has_mask else WSParserState . READ_PAYLOAD )
# read payload mask
if self . _state == WSParserState . READ_PAYLOAD_MASK :
if buf_length - start_pos >= 4 :
self . _frame_mask = buf [ start_pos : start_pos + 4 ]
start_pos += 4
self . _state = WSParserState . READ_PAYLOAD
else :
break
if self . _state == WSParserState . READ_PAYLOAD :
length = self . _payload_length
payload = self . _frame_payload
chunk_len = buf_length - start_pos
if length >= chunk_len :
self . _payload_length = length - chunk_len
payload . extend ( buf [ start_pos : ] )
start_pos = buf_length
else :
self . _payload_length = 0
payload . extend ( buf [ start_pos : start_pos + length ] )
start_pos = start_pos + length
if self . _payload_length == 0 :
if self . _has_mask :
assert self . _frame_mask is not None
_websocket_mask ( self . _frame_mask , payload )
frames . append ( ( self . _frame_fin , self . _frame_opcode , payload , self . _compressed ) )
self . _frame_payload = bytearray ( )
self . _state = WSParserState . READ_HEADER
else :
break
self . _tail = buf [ start_pos : ]
return frames |
def partitioning_type ( self ) :
"""Union [ str , None ] : Time partitioning of the table if it is
partitioned ( Defaults to : data : ` None ` ) .""" | warnings . warn ( "This method will be deprecated in future versions. Please use " "TableListItem.time_partitioning.type_ instead." , PendingDeprecationWarning , stacklevel = 2 , )
if self . time_partitioning is not None :
return self . time_partitioning . type_ |
def is_whitelisted ( self , addrinfo ) :
"""Returns if a result of ` ` socket . getaddrinfo ` ` is in the socket address
whitelist .""" | # For details about the ` ` getaddrinfo ` ` struct , see the Python docs :
# http : / / docs . python . org / library / socket . html # socket . getaddrinfo
family , socktype , proto , canonname , sockaddr = addrinfo
address , port = sockaddr [ : 2 ]
return address in self . socket_address_whitelist |
def minimum ( self ) :
"""Returns the minimum value for this ruler . If the cached value is None ,
then a default value will be specified based on the ruler type .
: return < variant >""" | if ( self . _minimum is not None ) :
return self . _minimum
rtype = self . rulerType ( )
if ( rtype == XChartRuler . Type . Number ) :
self . _minimum = 0
elif ( rtype == XChartRuler . Type . Date ) :
self . _minimum = QDate . currentDate ( )
elif ( rtype == XChartRuler . Type . Datetime ) :
self . _minimum = QDateTime . currentDateTime ( )
elif ( rtype == XChartRuler . Type . Time ) :
self . _minimum = QDateTime . currentDateTime ( ) . time ( )
else :
notches = self . notches ( )
if ( notches ) :
self . _minimum = notches [ 0 ]
return self . _minimum |
def _no_auto_update_getter ( self ) :
""": class : ` bool ` . Boolean controlling whether the : meth : ` start _ update `
method is automatically called by the : meth : ` update ` method
Examples
You can disable the automatic update via
> > > with data . no _ auto _ update :
. . . data . update ( time = 1)
. . . data . start _ update ( )
To permanently disable the automatic update , simply set
> > > data . no _ auto _ update = True
> > > data . update ( time = 1)
> > > data . no _ auto _ update = False # reenable automatical update""" | if getattr ( self , '_no_auto_update' , None ) is not None :
return self . _no_auto_update
else :
self . _no_auto_update = utils . _TempBool ( )
return self . _no_auto_update |
def check_member_state ( self ) :
"""Verify that all RS members have an acceptable state .""" | bad_states = ( 0 , 3 , 4 , 5 , 6 , 9 )
try :
rs_status = self . run_command ( 'replSetGetStatus' )
bad_members = [ member for member in rs_status [ 'members' ] if member [ 'state' ] in bad_states ]
if bad_members :
return False
except pymongo . errors . AutoReconnect : # catch ' No replica set primary available ' Exception
return False
logger . debug ( "all members in correct state" )
return True |
def find_token_type ( self , request ) :
"""Token type identification .
RFC 6749 does not provide a method for easily differentiating between
different token types during protected resource access . We estimate
the most likely token type ( if any ) by asking each known token type
to give an estimation based on the request .""" | estimates = sorted ( ( ( t . estimate_type ( request ) , n ) for n , t in self . tokens . items ( ) ) , reverse = True )
return estimates [ 0 ] [ 1 ] if len ( estimates ) else None |
def _apply_with_random_selector ( x , func , num_cases ) :
"""Computes func ( x , sel ) , with sel sampled from [ 0 . . . num _ cases - 1 ] .
Args :
x : input Tensor .
func : Python function to apply .
num _ cases : Python int32 , number of cases to sample sel from .
Returns :
The result of func ( x , sel ) , where func receives the value of the
selector as a python integer , but sel is sampled dynamically .""" | sel = tf . random_uniform ( [ ] , maxval = num_cases , dtype = tf . int32 )
# Pass the real x only to one of the func calls .
return control_flow_ops . merge ( [ func ( control_flow_ops . switch ( x , tf . equal ( sel , case ) ) [ 1 ] , case ) for case in range ( num_cases ) ] ) [ 0 ] |
def _check_stringify_year_row ( self , row_index ) :
'''Checks the given row to see if it is labeled year data and fills any blank years within that
data .''' | table_row = self . table [ row_index ]
# State trackers
prior_year = None
for column_index in range ( self . start [ 1 ] + 1 , self . end [ 1 ] ) :
current_year = table_row [ column_index ]
# Quit if we see
if not self . _check_years ( current_year , prior_year ) :
return
# Only copy when we see a non - empty entry
if current_year :
prior_year = current_year
# If we have a title of years , convert them to strings
self . _stringify_row ( row_index ) |
def is_mag_data ( mdat ) :
'''is _ mag _ data ( dat ) yields True if the given data is a valid set of magnification data and False
otherwise .
Note that this does not return True for all valid return values of the mag _ data ( ) function :
specifically , if the mag _ data ( ) function yields a list of mag - data maps or a lazy - map of the
mag - data maps split out by visual area , then this will return False . This function only returns
True for a map of mag data itself .''' | if not pimms . is_map ( mdat ) :
return False
for k in [ 'surface_coordinates' , 'visual_coordinates' , 'mesh' , 'submesh' , 'mask' , 'retinotopy_data' , 'masked_data' , 'surface_areas' , 'visual_areas' ] :
if k not in mdat :
return False
return True |
def _filter_startswith ( self , term , field_name , field_type , is_not ) :
"""Returns a startswith query on the un - stemmed term .
Assumes term is not a list .""" | if field_type == 'text' :
if len ( term . split ( ) ) == 1 :
term = '^ %s*' % term
query = self . backend . parse_query ( term )
else :
term = '^ %s' % term
query = self . _phrase_query ( term . split ( ) , field_name , field_type )
else :
term = '^%s*' % term
query = self . backend . parse_query ( term )
if is_not :
return xapian . Query ( xapian . Query . OP_AND_NOT , self . _all_query ( ) , query )
return query |
def search ( self , search_string ) :
"""Searches for a given string through the resources ' labels .
: param search _ string :
: return : an instance of ` HucitAuthor ` or ` HucitWork ` .""" | query = """
SELECT ?s ?label ?type
WHERE {
?s a ?type .
?s rdfs:label ?label .
?label bif:contains "'%s'" .
}
""" % search_string
response = self . _session . default_store . execute_sparql ( query )
results = [ ( result [ 's' ] [ 'value' ] , result [ 'label' ] [ 'value' ] , result [ 'type' ] [ 'value' ] ) for result in response [ "results" ] [ "bindings" ] ]
resources = [ ( label , self . _session . get_resource ( subject , self . _session . get_class ( type ) ) ) for subject , label , type in results ]
Name = self . _session . get_class ( surf . ns . EFRBROO [ 'F12_Name' ] )
Title = self . _session . get_class ( surf . ns . EFRBROO [ 'E35_Title' ] )
Work = self . _session . get_class ( surf . ns . EFRBROO [ 'F1_Work' ] )
Person = self . _session . get_class ( surf . ns . EFRBROO [ 'F10_Person' ] )
result = [ ]
for label , resource in resources :
if resource . uri == surf . ns . EFRBROO [ 'E35_Title' ] :
work = Work . get_by ( efrbroo_P102_has_title = resource ) . first ( )
result . append ( ( label , work ) )
elif resource . uri == surf . ns . EFRBROO [ 'F12_Name' ] :
author = Person . get_by ( ecrm_P1_is_identified_by = resource ) . first ( )
result . append ( ( label , author ) )
elif resource . uri == surf . ns . ECRM [ 'E41_Appellation' ] :
try :
name = Name . get_by ( ecrm_P139_has_alternative_form = resource ) . first ( )
assert name is not None
author = Person . get_by ( ecrm_P1_is_identified_by = name ) . first ( )
result . append ( ( label , author ) )
except Exception as e :
title = Title . get_by ( ecrm_P139_has_alternative_form = resource ) . first ( )
assert title is not None
work = Work . get_by ( efrbroo_P102_has_title = title ) . first ( )
result . append ( ( label , work ) )
return result |
async def _maybe_release_last_part ( self ) -> None :
"""Ensures that the last read body part is read completely .""" | if self . _last_part is not None :
if not self . _last_part . at_eof ( ) :
await self . _last_part . release ( )
self . _unread . extend ( self . _last_part . _unread )
self . _last_part = None |
def draw_image ( self , img_filename : str , x : float , y : float , w : float , h : float ) -> None :
"""Draws the given image .""" | pass |
def restoreSettings ( self , settings ) :
"""Restores the settings for this logger from the inputed settings .
: param < QtCore . QSettings >""" | val = unwrapVariant ( settings . value ( 'format' ) )
if val :
self . setFormatText ( val )
levels = unwrapVariant ( settings . value ( 'levels' ) )
if levels :
self . setActiveLevels ( map ( int , levels . split ( ',' ) ) )
logger_levels = unwrapVariant ( settings . value ( 'loggerLevels' ) )
if logger_levels :
for key in logger_levels . split ( ',' ) :
logger , lvl = key . split ( ':' )
lvl = int ( lvl )
self . setLoggerLevel ( logger , lvl ) |
def put_everything_together ( self ) :
"""All of the elements of the final SVG file are put together in the correct order ( e . g . lines are placed behind plots
and the molecule ) .""" | molecule_list = [ self . filestart ] + [ self . white_circles ] + [ self . draw_molecule ] + [ self . draw . draw_hbonds ] + [ self . draw . draw_pi_lines ] + [ self . draw . draw_saltbridges ] + [ self . draw . cloud ] + [ self . draw_plots ] + [ self . end_symbol ]
self . final_molecule = "" . join ( map ( str , molecule_list ) ) |
def set_artefact_path ( self , path_to_zip_file ) :
"""Set the route to the local file to deploy
: param path _ to _ zip _ file :
: return :""" | self . config [ "deploy" ] [ "deploy_file" ] = path_to_zip_file
return { 'ZipFile' : self . build . read ( self . config [ "deploy" ] [ "deploy_file" ] ) } |
def getopt ( self , p , default = None ) :
"""Returns the first option value stored that matches p or default .""" | for k , v in self . pairs :
if k == p :
return v
return default |
def read_sql ( sql , con , index_col = None , coerce_float = True , params = None , parse_dates = None , columns = None , chunksize = None , partition_column = None , lower_bound = None , upper_bound = None , max_sessions = None , ) :
"""Read SQL query or database table into a DataFrame .
Args :
sql : string or SQLAlchemy Selectable ( select or text object ) SQL query to be executed or a table name .
con : SQLAlchemy connectable ( engine / connection ) or database string URI or DBAPI2 connection ( fallback mode )
index _ col : Column ( s ) to set as index ( MultiIndex ) .
coerce _ float : Attempts to convert values of non - string , non - numeric objects ( like decimal . Decimal ) to
floating point , useful for SQL result sets .
params : List of parameters to pass to execute method . The syntax used
to pass parameters is database driver dependent . Check your
database driver documentation for which of the five syntax styles ,
described in PEP 249 ' s paramstyle , is supported .
parse _ dates :
- List of column names to parse as dates .
- Dict of ` ` { column _ name : format string } ` ` where format string is
strftime compatible in case of parsing string times , or is one of
( D , s , ns , ms , us ) in case of parsing integer timestamps .
- Dict of ` ` { column _ name : arg dict } ` ` , where the arg dict corresponds
to the keyword arguments of : func : ` pandas . to _ datetime `
Especially useful with databases without native Datetime support ,
such as SQLite .
columns : List of column names to select from SQL table ( only used when reading a table ) .
chunksize : If specified , return an iterator where ` chunksize ` is the number of rows to include in each chunk .
partition _ column : column used to share the data between the workers ( MUST be a INTEGER column )
lower _ bound : the minimum value to be requested from the partition _ column
upper _ bound : the maximum value to be requested from the partition _ column
max _ sessions : the maximum number of simultaneous connections allowed to use
Returns :
Pandas Dataframe""" | _ , _ , _ , kwargs = inspect . getargvalues ( inspect . currentframe ( ) )
return DataFrame ( query_compiler = ExperimentalBaseFactory . read_sql ( ** kwargs ) ) |
def clenshaw ( a , alpha , beta , t ) :
"""Clenshaw ' s algorithm for evaluating
S ( t ) = \\ sum a _ k P _ k ( alpha , beta ) ( t )
where P _ k ( alpha , beta ) is the kth orthogonal polynomial defined by the
recurrence coefficients alpha , beta .
See < https : / / en . wikipedia . org / wiki / Clenshaw _ algorithm > for details .""" | n = len ( alpha )
assert len ( beta ) == n
assert len ( a ) == n + 1
try :
b = numpy . empty ( ( n + 1 , ) + t . shape )
except AttributeError : # ' float ' object has no attribute ' shape '
b = numpy . empty ( n + 1 )
# b [ 0 ] is unused , can be any value
# TODO shift the array
b [ 0 ] = 1.0
b [ n ] = a [ n ]
b [ n - 1 ] = a [ n - 1 ] + ( t - alpha [ n - 1 ] ) * b [ n ]
for k in range ( n - 2 , 0 , - 1 ) :
b [ k ] = a [ k ] + ( t - alpha [ k ] ) * b [ k + 1 ] - beta [ k + 1 ] * b [ k + 2 ]
phi0 = 1.0
phi1 = t - alpha [ 0 ]
return phi0 * a [ 0 ] + phi1 * b [ 1 ] - beta [ 1 ] * phi0 * b [ 2 ] |
def __get_request ( self , host , soup ) :
"""Build a request from the given soup form .
Args :
host str : The URL of the current queue item .
soup ( obj ) : The BeautifulSoup form .
Returns :
: class : ` nyawc . http . Request ` : The new Request .""" | url = URLHelper . make_absolute ( host , self . __trim_grave_accent ( soup [ "action" ] ) ) if soup . has_attr ( "action" ) else host
method_original = soup [ "method" ] if soup . has_attr ( "method" ) else "get"
method = "post" if method_original . lower ( ) == "post" else "get"
data = self . __get_form_data ( soup )
return Request ( url , method , data ) |
def get_file ( self , fax_id , ** kwargs ) : # noqa : E501
"""get a file # noqa : E501
Get your fax archive file using it ' s id . # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . get _ file ( fax _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str fax _ id : ( required )
: param str format : can be ' pdf ' or ' tiff '
: return : file
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return self . get_file_with_http_info ( fax_id , ** kwargs )
# noqa : E501
else :
( data ) = self . get_file_with_http_info ( fax_id , ** kwargs )
# noqa : E501
return data |
def _sync_table ( self , columns ) :
"""Lazy load , create or adapt the table structure in the database .""" | if self . _table is None : # Load an existing table from the database .
self . _reflect_table ( )
if self . _table is None : # Create the table with an initial set of columns .
if not self . _auto_create :
raise DatasetException ( "Table does not exist: %s" % self . name )
# Keep the lock scope small because this is run very often .
with self . db . lock :
self . _threading_warn ( )
self . _table = SQLATable ( self . name , self . db . metadata , schema = self . db . schema )
if self . _primary_id is not False : # This can go wrong on DBMS like MySQL and SQLite where
# tables cannot have no columns .
primary_id = self . _primary_id or self . PRIMARY_DEFAULT
primary_type = self . _primary_type or Types . integer
increment = primary_type in [ Types . integer , Types . bigint ]
column = Column ( primary_id , primary_type , primary_key = True , autoincrement = increment )
self . _table . append_column ( column )
for column in columns :
if not column . name == self . _primary_id :
self . _table . append_column ( column )
self . _table . create ( self . db . executable , checkfirst = True )
elif len ( columns ) :
with self . db . lock :
self . _reflect_table ( )
self . _threading_warn ( )
for column in columns :
if not self . has_column ( column . name ) :
self . db . op . add_column ( self . name , column , self . db . schema )
self . _reflect_table ( ) |
def crosscorr ( self , signal , lag = 0 ) :
"""Cross correlate series data against another signal .
Parameters
signal : array
Signal to correlate against ( must be 1D ) .
lag : int
Range of lags to consider , will cover ( - lag , + lag ) .""" | from scipy . linalg import norm
s = asarray ( signal )
s = s - mean ( s )
s = s / norm ( s )
if size ( s ) != size ( self . index ) :
raise Exception ( 'Size of signal to cross correlate with, %g, ' 'does not match size of series' % size ( s ) )
# created a matrix with lagged signals
if lag is not 0 :
shifts = range ( - lag , lag + 1 )
d = len ( s )
m = len ( shifts )
sshifted = zeros ( ( m , d ) )
for i in range ( 0 , len ( shifts ) ) :
tmp = roll ( s , shifts [ i ] )
if shifts [ i ] < 0 :
tmp [ ( d + shifts [ i ] ) : ] = 0
if shifts [ i ] > 0 :
tmp [ : shifts [ i ] ] = 0
sshifted [ i , : ] = tmp
s = sshifted
else :
shifts = [ 0 ]
def get ( y , s ) :
y = y - mean ( y )
n = norm ( y )
if n == 0 :
b = zeros ( ( s . shape [ 0 ] , ) )
else :
y /= n
b = dot ( s , y )
return b
return self . map ( lambda x : get ( x , s ) , index = shifts ) |
def to_csv ( self , path_or_buf = None , sep = "," , na_rep = '' , float_format = None , columns = None , header = True , index = True , index_label = None , mode = 'w' , encoding = None , compression = 'infer' , quoting = None , quotechar = '"' , line_terminator = None , chunksize = None , tupleize_cols = None , date_format = None , doublequote = True , escapechar = None , decimal = '.' ) :
r"""Write object to a comma - separated values ( csv ) file .
. . versionchanged : : 0.24.0
The order of arguments for Series was changed .
Parameters
path _ or _ buf : str or file handle , default None
File path or object , if None is provided the result is returned as
a string . If a file object is passed it should be opened with
` newline = ' ' ` , disabling universal newlines .
. . versionchanged : : 0.24.0
Was previously named " path " for Series .
sep : str , default ' , '
String of length 1 . Field delimiter for the output file .
na _ rep : str , default ' '
Missing data representation .
float _ format : str , default None
Format string for floating point numbers .
columns : sequence , optional
Columns to write .
header : bool or list of str , default True
Write out the column names . If a list of strings is given it is
assumed to be aliases for the column names .
. . versionchanged : : 0.24.0
Previously defaulted to False for Series .
index : bool , default True
Write row names ( index ) .
index _ label : str or sequence , or False , default None
Column label for index column ( s ) if desired . If None is given , and
` header ` and ` index ` are True , then the index names are used . A
sequence should be given if the object uses MultiIndex . If
False do not print fields for index names . Use index _ label = False
for easier importing in R .
mode : str
Python write mode , default ' w ' .
encoding : str , optional
A string representing the encoding to use in the output file ,
defaults to ' utf - 8 ' .
compression : str , default ' infer '
Compression mode among the following possible values : { ' infer ' ,
' gzip ' , ' bz2 ' , ' zip ' , ' xz ' , None } . If ' infer ' and ` path _ or _ buf `
is path - like , then detect compression from the following
extensions : ' . gz ' , ' . bz2 ' , ' . zip ' or ' . xz ' . ( otherwise no
compression ) .
. . versionchanged : : 0.24.0
' infer ' option added and set to default .
quoting : optional constant from csv module
Defaults to csv . QUOTE _ MINIMAL . If you have set a ` float _ format `
then floats are converted to strings and thus csv . QUOTE _ NONNUMERIC
will treat them as non - numeric .
quotechar : str , default ' \ " '
String of length 1 . Character used to quote fields .
line _ terminator : str , optional
The newline character or character sequence to use in the output
file . Defaults to ` os . linesep ` , which depends on the OS in which
this method is called ( ' \ n ' for linux , ' \ r \ n ' for Windows , i . e . ) .
. . versionchanged : : 0.24.0
chunksize : int or None
Rows to write at a time .
tupleize _ cols : bool , default False
Write MultiIndex columns as a list of tuples ( if True ) or in
the new , expanded format , where each MultiIndex column is a row
in the CSV ( if False ) .
. . deprecated : : 0.21.0
This argument will be removed and will always write each row
of the multi - index as a separate row in the CSV file .
date _ format : str , default None
Format string for datetime objects .
doublequote : bool , default True
Control quoting of ` quotechar ` inside a field .
escapechar : str , default None
String of length 1 . Character used to escape ` sep ` and ` quotechar `
when appropriate .
decimal : str , default ' . '
Character recognized as decimal separator . E . g . use ' , ' for
European data .
Returns
None or str
If path _ or _ buf is None , returns the resulting csv format as a
string . Otherwise returns None .
See Also
read _ csv : Load a CSV file into a DataFrame .
to _ excel : Write DataFrame to an Excel file .
Examples
> > > df = pd . DataFrame ( { ' name ' : [ ' Raphael ' , ' Donatello ' ] ,
. . . ' mask ' : [ ' red ' , ' purple ' ] ,
. . . ' weapon ' : [ ' sai ' , ' bo staff ' ] } )
> > > df . to _ csv ( index = False )
' name , mask , weapon \ nRaphael , red , sai \ nDonatello , purple , bo staff \ n '""" | df = self if isinstance ( self , ABCDataFrame ) else self . to_frame ( )
if tupleize_cols is not None :
warnings . warn ( "The 'tupleize_cols' parameter is deprecated and " "will be removed in a future version" , FutureWarning , stacklevel = 2 )
else :
tupleize_cols = False
from pandas . io . formats . csvs import CSVFormatter
formatter = CSVFormatter ( df , path_or_buf , line_terminator = line_terminator , sep = sep , encoding = encoding , compression = compression , quoting = quoting , na_rep = na_rep , float_format = float_format , cols = columns , header = header , index = index , index_label = index_label , mode = mode , chunksize = chunksize , quotechar = quotechar , tupleize_cols = tupleize_cols , date_format = date_format , doublequote = doublequote , escapechar = escapechar , decimal = decimal )
formatter . save ( )
if path_or_buf is None :
return formatter . path_or_buf . getvalue ( ) |
def bytes ( self ) :
"""Returns the provided data as bytes .""" | if self . _filename :
with open ( self . _filename , "rb" ) as f :
return f . read ( )
else :
return self . _bytes |
def request ( self , url , method = 'GET' , params = None , data = None , expected_response_code = 200 , headers = None ) :
"""Make a HTTP request to the InfluxDB API .
: param url : the path of the HTTP request , e . g . write , query , etc .
: type url : str
: param method : the HTTP method for the request , defaults to GET
: type method : str
: param params : additional parameters for the request , defaults to None
: type params : dict
: param data : the data of the request , defaults to None
: type data : str
: param expected _ response _ code : the expected response code of
the request , defaults to 200
: type expected _ response _ code : int
: param headers : headers to add to the request
: type headers : dict
: returns : the response from the request
: rtype : : class : ` requests . Response `
: raises InfluxDBServerError : if the response code is any server error
code ( 5xx )
: raises InfluxDBClientError : if the response code is not the
same as ` expected _ response _ code ` and is not a server error code""" | url = "{0}/{1}" . format ( self . _baseurl , url )
if headers is None :
headers = self . _headers
if params is None :
params = { }
if isinstance ( data , ( dict , list ) ) :
data = json . dumps ( data )
# Try to send the request more than once by default ( see # 103)
retry = True
_try = 0
while retry :
try :
response = self . _session . request ( method = method , url = url , auth = ( self . _username , self . _password ) , params = params , data = data , headers = headers , proxies = self . _proxies , verify = self . _verify_ssl , timeout = self . _timeout )
break
except ( requests . exceptions . ConnectionError , requests . exceptions . HTTPError , requests . exceptions . Timeout ) :
_try += 1
if self . _retries != 0 :
retry = _try < self . _retries
if method == "POST" :
time . sleep ( ( 2 ** _try ) * random . random ( ) / 100.0 )
if not retry :
raise
# if there ' s not an error , there must have been a successful response
if 500 <= response . status_code < 600 :
raise InfluxDBServerError ( response . content )
elif response . status_code == expected_response_code :
return response
else :
raise InfluxDBClientError ( response . content , response . status_code ) |
def make_requests_session ( ) :
""": returns : requests session
: rtype : : class : ` requests . Session `""" | session = requests . Session ( )
version = __import__ ( 'steam' ) . __version__
ua = "python-steam/{0} {1}" . format ( version , session . headers [ 'User-Agent' ] )
session . headers [ 'User-Agent' ] = ua
return session |
def fetch_html ( self , msg_nums ) :
"""Given a message number that we found with imap _ search ,
get the text / html content .
@ Params
msg _ nums - message number to get html message for
@ Returns
HTML content of message matched by message number""" | if not msg_nums :
raise Exception ( "Invalid Message Number!" )
return self . __imap_fetch_content_type ( msg_nums , self . HTML ) |
def dataset_generator ( filepath , dataset , chunk_size = 1 , start_idx = None , end_idx = None ) :
"""Generate example dicts .""" | encoder = dna_encoder . DNAEncoder ( chunk_size = chunk_size )
with h5py . File ( filepath , "r" ) as h5_file : # Get input keys from h5 _ file
src_keys = [ s % dataset for s in [ "%s_in" , "%s_na" , "%s_out" ] ]
src_values = [ h5_file [ k ] for k in src_keys ]
inp_data , mask_data , out_data = src_values
assert len ( set ( [ v . len ( ) for v in src_values ] ) ) == 1
if start_idx is None :
start_idx = 0
if end_idx is None :
end_idx = inp_data . len ( )
for i in range ( start_idx , end_idx ) :
if i % 100 == 0 :
print ( "Generating example %d for %s" % ( i , dataset ) )
inputs , mask , outputs = inp_data [ i ] , mask_data [ i ] , out_data [ i ]
ex_dict = to_example_dict ( encoder , inputs , mask , outputs )
# Original data has one output for every 128 input bases . Ensure that the
# ratio has been maintained given the chunk size and removing EOS .
assert ( len ( ex_dict [ "inputs" ] ) - 1 ) == ( ( 128 // chunk_size ) * ex_dict [ "targets_shape" ] [ 0 ] )
yield ex_dict |
def offset ( self , value ) :
"""Allows for skipping a specified number of results in query . Useful
for pagination .""" | self . _query = self . _query . skip ( value )
return self |
def receive ( self , data ) :
"""receive ( data ) - > List of decoded messages .
Processes : obj : ` data ` , which must be a bytes - like object ,
and returns a ( possibly empty ) list with : class : ` bytes ` objects ,
each containing a decoded message .
Any non - terminated SLIP packets in : obj : ` data `
are buffered , and processed with the next call to : meth : ` receive ` .
: param bytes data : The bytes - like object to be processed .
An empty : obj : ` data ` parameter forces the internal
buffer to be flushed and decoded .
: return : A ( possibly empty ) list of decoded messages .
: rtype : list ( bytes )
: raises ProtocolError : An invalid byte sequence has been detected .""" | # Empty data indicates that the data reception is complete .
# To force a buffer flush , an END byte is added , so that the
# current contents of _ recv _ buffer will form a complete message .
if not data :
data = END
self . _recv_buffer += data
# The following situations can occur :
# 1 ) _ recv _ buffer is empty or contains only END bytes - - > no packets available
# 2 ) _ recv _ buffer contains non - END bytes - - > packets are available
# Strip leading END bytes from _ recv _ buffer to avoid handling empty _ packets .
self . _recv_buffer = self . _recv_buffer . lstrip ( END )
if self . _recv_buffer : # The _ recv _ buffer contains non - END bytes .
# It is now split on sequences of one or more END bytes .
# The trailing element from the split operation is a possibly incomplete
# packet ; this element is therefore used as the new _ recv _ buffer .
# If _ recv _ buffer contains one or more trailing END bytes ,
# ( meaning that there are no incomplete packets ) , then the last element ,
# and therefore the new _ recv _ buffer , is an empty bytes object .
self . _packets . extend ( re . split ( END + b'+' , self . _recv_buffer ) )
self . _recv_buffer = self . _packets . pop ( )
# Process the buffered packets
return self . flush ( ) |
def bs_plot_data ( self , zero_to_efermi = True ) :
"""Get the data nicely formatted for a plot
Args :
zero _ to _ efermi : Automatically subtract off the Fermi energy from the
eigenvalues and plot .
Returns :
dict : A dictionary of the following format :
ticks : A dict with the ' distances ' at which there is a kpoint ( the
x axis ) and the labels ( None if no label ) .
energy : A dict storing bands for spin up and spin down data
[ { Spin : [ band _ index ] [ k _ point _ index ] } ] as a list ( one element
for each branch ) of energy for each kpoint . The data is
stored by branch to facilitate the plotting .
vbm : A list of tuples ( distance , energy ) marking the vbms . The
energies are shifted with respect to the fermi level is the
option has been selected .
cbm : A list of tuples ( distance , energy ) marking the cbms . The
energies are shifted with respect to the fermi level is the
option has been selected .
lattice : The reciprocal lattice .
zero _ energy : This is the energy used as zero for the plot .
band _ gap : A string indicating the band gap and its nature ( empty if
it ' s a metal ) .
is _ metal : True if the band structure is metallic ( i . e . , there is at
least one band crossing the fermi level ) .""" | distance = [ ]
energy = [ ]
if self . _bs . is_metal ( ) :
zero_energy = self . _bs . efermi
else :
zero_energy = self . _bs . get_vbm ( ) [ 'energy' ]
if not zero_to_efermi :
zero_energy = 0.0
for b in self . _bs . branches :
if self . _bs . is_spin_polarized :
energy . append ( { str ( Spin . up ) : [ ] , str ( Spin . down ) : [ ] } )
else :
energy . append ( { str ( Spin . up ) : [ ] } )
distance . append ( [ self . _bs . distance [ j ] for j in range ( b [ 'start_index' ] , b [ 'end_index' ] + 1 ) ] )
ticks = self . get_ticks ( )
for i in range ( self . _nb_bands ) :
energy [ - 1 ] [ str ( Spin . up ) ] . append ( [ self . _bs . bands [ Spin . up ] [ i ] [ j ] - zero_energy for j in range ( b [ 'start_index' ] , b [ 'end_index' ] + 1 ) ] )
if self . _bs . is_spin_polarized :
for i in range ( self . _nb_bands ) :
energy [ - 1 ] [ str ( Spin . down ) ] . append ( [ self . _bs . bands [ Spin . down ] [ i ] [ j ] - zero_energy for j in range ( b [ 'start_index' ] , b [ 'end_index' ] + 1 ) ] )
vbm = self . _bs . get_vbm ( )
cbm = self . _bs . get_cbm ( )
vbm_plot = [ ]
cbm_plot = [ ]
for index in cbm [ 'kpoint_index' ] :
cbm_plot . append ( ( self . _bs . distance [ index ] , cbm [ 'energy' ] - zero_energy if zero_to_efermi else cbm [ 'energy' ] ) )
for index in vbm [ 'kpoint_index' ] :
vbm_plot . append ( ( self . _bs . distance [ index ] , vbm [ 'energy' ] - zero_energy if zero_to_efermi else vbm [ 'energy' ] ) )
bg = self . _bs . get_band_gap ( )
direct = "Indirect"
if bg [ 'direct' ] :
direct = "Direct"
return { 'ticks' : ticks , 'distances' : distance , 'energy' : energy , 'vbm' : vbm_plot , 'cbm' : cbm_plot , 'lattice' : self . _bs . lattice_rec . as_dict ( ) , 'zero_energy' : zero_energy , 'is_metal' : self . _bs . is_metal ( ) , 'band_gap' : "{} {} bandgap = {}" . format ( direct , bg [ 'transition' ] , bg [ 'energy' ] ) if not self . _bs . is_metal ( ) else "" } |
def getexptimeimg ( self , chip ) :
"""Notes
Return an array representing the exposure time per pixel for the detector .
This method will be overloaded for IR detectors which have their own
EXP arrays , namely , WFC3 / IR and NICMOS images .
: units :
None
Returns
exptimeimg : numpy array
The method will return an array of the same shape as the image .""" | sci_chip = self . _image [ self . scienceExt , chip ]
if sci_chip . _wtscl_par == 'expsq' :
wtscl = sci_chip . _exptime * sci_chip . _exptime
else :
wtscl = sci_chip . _exptime
return np . ones ( sci_chip . image_shape , dtype = sci_chip . image_dtype ) * wtscl |
def listwrap ( value ) :
"""PERFORMS THE FOLLOWING TRANSLATION
None - > [ ]
value - > [ value ]
[ . . . ] - > [ . . . ] ( unchanged list )
# # MOTIVATION # #
OFTEN IT IS NICE TO ALLOW FUNCTION PARAMETERS TO BE ASSIGNED A VALUE ,
OR A list - OF - VALUES , OR NULL . CHECKING FOR WHICH THE CALLER USED IS
TEDIOUS . INSTEAD WE CAST FROM THOSE THREE CASES TO THE SINGLE CASE
OF A LIST
# BEFORE
def do _ it ( a ) :
if a is None :
return
if not isinstance ( a , list ) :
a = [ a ]
for x in a :
# do something
# AFTER
def do _ it ( a ) :
for x in listwrap ( a ) :
# do something""" | if value == None :
return FlatList ( )
elif is_list ( value ) :
return wrap ( value )
elif isinstance ( value , set ) :
return wrap ( list ( value ) )
else :
return wrap ( [ unwrap ( value ) ] ) |
def main ( arguments = None ) :
"""* The main function used when ` ` cl _ utils . py ` ` is run as a single script from the cl , or when installed as a cl command *""" | # setup the command - line util settings
su = tools ( arguments = arguments , docString = __doc__ , logLevel = "WARNING" , options_first = False , projectName = "polyglot" )
arguments , settings , log , dbConn = su . setup ( )
# unpack remaining cl arguments using ` exec ` to setup the variable names
# automatically
for arg , val in arguments . iteritems ( ) :
if arg [ 0 ] == "-" :
varname = arg . replace ( "-" , "" ) + "Flag"
else :
varname = arg . replace ( "<" , "" ) . replace ( ">" , "" )
if isinstance ( val , str ) or isinstance ( val , unicode ) :
exec ( varname + " = '%s'" % ( val , ) )
else :
exec ( varname + " = %s" % ( val , ) )
if arg == "--dbConn" :
dbConn = val
log . debug ( '%s = %s' % ( varname , val , ) )
# # START LOGGING # #
startTime = times . get_now_sql_datetime ( )
log . info ( '--- STARTING TO RUN THE cl_utils.py AT %s' % ( startTime , ) )
# for k , v in locals ( ) . iteritems ( ) :
# print k , v
if not destinationFolder :
destinationFolder = os . getcwd ( )
if not filenameFlag :
filenameFlag = False
if not cleanFlag :
readability = False
else :
readability = True
if init :
from os . path import expanduser
home = expanduser ( "~" )
filepath = home + "/.config/polyglot/polyglot.yaml"
try :
cmd = """open %(filepath)s""" % locals ( )
p = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True )
except :
pass
try :
cmd = """start %(filepath)s""" % locals ( )
p = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True )
except :
pass
if pdf and url :
filepath = printpdf . printpdf ( log = log , settings = settings , url = url , folderpath = destinationFolder , title = filenameFlag , append = False , readability = readability ) . get ( )
if html and url :
cleaner = htmlCleaner . htmlCleaner ( log = log , settings = settings , url = url , outputDirectory = destinationFolder , title = filenameFlag , # SET TO FALSE TO USE WEBPAGE TITLE ,
style = cleanFlag , # add polyglot ' s styling to the HTML document
metadata = True , # include metadata in generated HTML ( e . g . title ) ,
h1 = True # include title as H1 at the top of the doc
)
filepath = cleaner . clean ( )
if epub :
if url :
iinput = url
else :
iinput = docx
from polyglot import ebook
epub = ebook ( log = log , settings = settings , urlOrPath = iinput , title = filenameFlag , bookFormat = "epub" , outputDirectory = destinationFolder )
filepath = epub . get ( )
if mobi :
if url :
iinput = url
else :
iinput = docx
from polyglot import ebook
mobi = ebook ( log = log , settings = settings , urlOrPath = iinput , title = filenameFlag , bookFormat = "mobi" , outputDirectory = destinationFolder , )
filepath = mobi . get ( )
if kindle :
if url :
iinput = url
else :
iinput = docx
from polyglot import kindle
sender = kindle ( log = log , settings = settings , urlOrPath = iinput , title = filenameFlag )
success = sender . send ( )
if kindleNB2MD :
basename = os . path . basename ( notebook )
extension = os . path . splitext ( basename ) [ 1 ]
filenameNoExtension = os . path . splitext ( basename ) [ 0 ]
if destinationFolder :
filepath = destinationFolder + "/" + filenameNoExtension + ".md"
else :
filepath = notebook . replace ( "." + extension , ".md" )
from polyglot . markdown import kindle_notebook
nb = kindle_notebook ( log = log , kindleExportPath = notebook , outputPath = filepath )
nb . convert ( )
if openFlag :
try :
cmd = """open %(filepath)s""" % locals ( )
p = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True )
except :
pass
try :
cmd = """start %(filepath)s""" % locals ( )
p = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True )
except :
pass
if "dbConn" in locals ( ) and dbConn :
dbConn . commit ( )
dbConn . close ( )
# # FINISH LOGGING # #
endTime = times . get_now_sql_datetime ( )
runningTime = times . calculate_time_difference ( startTime , endTime )
log . info ( '-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' % ( endTime , runningTime , ) )
return |
def asarray_ndim ( a , * ndims , ** kwargs ) :
"""Ensure numpy array .
Parameters
a : array _ like
* ndims : int , optional
Allowed values for number of dimensions .
* * kwargs
Passed through to : func : ` numpy . array ` .
Returns
a : numpy . ndarray""" | allow_none = kwargs . pop ( 'allow_none' , False )
kwargs . setdefault ( 'copy' , False )
if a is None and allow_none :
return None
a = np . array ( a , ** kwargs )
if a . ndim not in ndims :
if len ( ndims ) > 1 :
expect_str = 'one of %s' % str ( ndims )
else : # noinspection PyUnresolvedReferences
expect_str = '%s' % ndims [ 0 ]
raise TypeError ( 'bad number of dimensions: expected %s; found %s' % ( expect_str , a . ndim ) )
return a |
def cartesian_to_helicity ( vector , numeric = False ) :
r"""This function takes vectors from the cartesian basis to the helicity basis .
For instance , we can check what are the vectors of the helicity basis .
> > > from sympy import pi
> > > em = polarization _ vector ( phi = 0 , theta = 0 , alpha = 0 , beta = - pi / 8 , p = 1)
> > > em
Matrix ( [
[ sqrt ( 2 ) / 2 ] ,
[ - sqrt ( 2 ) * I / 2 ] ,
> > > cartesian _ to _ helicity ( em )
Matrix ( [
> > > e0 = polarization _ vector ( phi = pi / 2 , theta = pi / 2 , alpha = pi / 2 , beta = 0 , p = 1)
> > > e0
Matrix ( [
> > > cartesian _ to _ helicity ( e0)
Matrix ( [
> > > ep = polarization _ vector ( phi = 0 , theta = 0 , alpha = pi / 2 , beta = pi / 8 , p = 1)
> > > ep
Matrix ( [
[ - sqrt ( 2 ) / 2 ] ,
[ - sqrt ( 2 ) * I / 2 ] ,
> > > cartesian _ to _ helicity ( ep )
Matrix ( [
Note that vectors in the helicity basis are built in a weird way by
convention :
. . math : :
\ vec { a } = - a _ { + 1 } \ vec { e } _ { - 1 } + a _ 0 \ vec { e } _ 0 - a _ { - 1 } \ vec { e } _ { + 1}
> > > from sympy import symbols
> > > am , a0 , ap = symbols ( " am a0 ap " )
> > > a = - ap * em + a0 * e0 - am * ep
Matrix ( [
[ sqrt ( 2 ) * am / 2 - sqrt ( 2 ) * ap / 2 ] ,
[ sqrt ( 2 ) * I * am / 2 + sqrt ( 2 ) * I * ap / 2 ] ,
[ a0 ] ] )
> > > cartesian _ to _ helicity ( a ) . expand ( )
Matrix ( [
[ am ] ,
[ a0 ] ,
[ ap ] ] )
We can also convert a numeric array
> > > r = [ [ [ 0.0 , 1.0 ] ,
. . . [ 1.0 , 0.0 ] ] ,
. . . [ [ 0.0 , - 1j ] ,
. . . [ 1j , 0.0 ] ] ,
. . . [ [ 1.0 , 0.0 ] ,
. . . [ 0.0 , - 1.0 ] ] ]
> > > cartesian _ to _ helicity ( r , numeric = True )
array ( [ [ [ 0 . + 0 . j , 0 . + 0 . j ] ,
[ 1.4142 + 0 . j , 0 . + 0 . j ] ] ,
< BLANKLINE >
[ [ 1 . + 0 . j , 0 . + 0 . j ] ,
[ 0 . + 0 . j , - 1 . + 0 . j ] ] ,
< BLANKLINE >
[ [ - 0 . + 0 . j , - 1.4142 + 0 . j ] ,
[ - 0 . + 0 . j , - 0 . + 0 . j ] ] ] )""" | if numeric :
vector = list ( vector )
vector [ 0 ] = nparray ( vector [ 0 ] )
vector [ 1 ] = nparray ( vector [ 1 ] )
vector [ 2 ] = nparray ( vector [ 2 ] )
v = [ ( vector [ 0 ] - 1j * vector [ 1 ] ) / npsqrt ( 2 ) , vector [ 2 ] , - ( vector [ 0 ] + 1j * vector [ 1 ] ) / npsqrt ( 2 ) ]
v = nparray ( v )
else :
v = [ ( vector [ 0 ] - I * vector [ 1 ] ) / sqrt ( 2 ) , vector [ 2 ] , - ( vector [ 0 ] + I * vector [ 1 ] ) / sqrt ( 2 ) ]
if type ( vector [ 0 ] ) in [ type ( Matrix ( [ 1 , 0 ] ) ) , type ( nparray ( [ 1 , 0 ] ) ) ] :
return v
else :
return Matrix ( v ) |
def get_submission_archive ( self , submissions , sub_folders , aggregations , archive_file = None ) :
""": param submissions : a list of submissions
: param sub _ folders : possible values :
[ ] : put all submissions in /
[ ' taskid ' ] : put all submissions for each task in a different directory / taskid /
[ ' username ' ] : put all submissions for each user in a different directory / username /
[ ' taskid ' , ' username ' ] : / taskid / username /
[ ' username ' , ' taskid ' ] : / username / taskid /
: return : a file - like object containing a tgz archive of all the submissions""" | tmpfile = archive_file if archive_file is not None else tempfile . TemporaryFile ( )
tar = tarfile . open ( fileobj = tmpfile , mode = 'w:gz' )
for submission in submissions :
submission = self . get_input_from_submission ( submission )
submission_yaml = io . BytesIO ( inginious . common . custom_yaml . dump ( submission ) . encode ( 'utf-8' ) )
# Considering multiple single submissions for each user
for username in submission [ "username" ] : # Compute base path in the tar file
base_path = "/"
for sub_folder in sub_folders :
if sub_folder == 'taskid' :
base_path = submission [ 'taskid' ] + base_path
elif sub_folder == 'username' :
base_path = '_' + '-' . join ( submission [ 'username' ] ) + base_path
base_path = base_path [ 1 : ]
elif sub_folder == 'aggregation' :
if username in aggregations :
if aggregations [ username ] is None : # If classrooms are not used , and user is not grouped , his classroom is replaced by None
base_path = '_' + '-' . join ( submission [ 'username' ] ) + base_path
base_path = base_path [ 1 : ]
else :
base_path = ( aggregations [ username ] [ "description" ] + " (" + str ( aggregations [ username ] [ "_id" ] ) + ")" ) . replace ( " " , "_" ) + base_path
base_path = '/' + base_path
base_path = base_path [ 1 : ]
submission_yaml_fname = base_path + str ( submission [ "_id" ] ) + '/submission.test'
# Avoid putting two times the same submission on the same place
if submission_yaml_fname not in tar . getnames ( ) :
info = tarfile . TarInfo ( name = submission_yaml_fname )
info . size = submission_yaml . getbuffer ( ) . nbytes
info . mtime = time . mktime ( submission [ "submitted_on" ] . timetuple ( ) )
# Add file in tar archive
tar . addfile ( info , fileobj = submission_yaml )
# If there is an archive , add it too
if 'archive' in submission and submission [ 'archive' ] is not None and submission [ 'archive' ] != "" :
subfile = self . _gridfs . get ( submission [ 'archive' ] )
subtar = tarfile . open ( fileobj = subfile , mode = "r:gz" )
for member in subtar . getmembers ( ) :
subtarfile = subtar . extractfile ( member )
member . name = base_path + str ( submission [ "_id" ] ) + "/archive/" + member . name
tar . addfile ( member , subtarfile )
subtar . close ( )
subfile . close ( )
# If there files that were uploaded by the student , add them
if submission [ 'input' ] is not None :
for pid , problem in submission [ 'input' ] . items ( ) : # If problem is a dict , it is a file ( from the specification of the problems )
if isinstance ( problem , dict ) : # Get the extension ( match extensions with more than one dot too )
DOUBLE_EXTENSIONS = [ '.tar.gz' , '.tar.bz2' , '.tar.bz' , '.tar.xz' ]
ext = ""
if not problem [ 'filename' ] . endswith ( tuple ( DOUBLE_EXTENSIONS ) ) :
_ , ext = os . path . splitext ( problem [ 'filename' ] )
else :
for t_ext in DOUBLE_EXTENSIONS :
if problem [ 'filename' ] . endswith ( t_ext ) :
ext = t_ext
subfile = io . BytesIO ( problem [ 'value' ] )
taskfname = base_path + str ( submission [ "_id" ] ) + '/uploaded_files/' + pid + ext
# Generate file info
info = tarfile . TarInfo ( name = taskfname )
info . size = subfile . getbuffer ( ) . nbytes
info . mtime = time . mktime ( submission [ "submitted_on" ] . timetuple ( ) )
# Add file in tar archive
tar . addfile ( info , fileobj = subfile )
# Close tarfile and put tempfile cursor at 0
tar . close ( )
tmpfile . seek ( 0 )
return tmpfile |
def modify ( self , ** params ) :
"""https : / / developers . coinbase . com / api # modify - an - account""" | data = self . api_client . update_account ( self . id , ** params )
self . update ( data )
return data |
def write_file ( self , path , contents ) :
"""Write a file of any type to the destination path . Useful for files like
robots . txt , manifest . json , and so on .
Args :
path ( str ) : The name of the file to write to .
contents ( str or bytes ) : The contents to write .""" | path = self . _get_dist_path ( path )
if not os . path . isdir ( os . path . dirname ( path ) ) :
os . makedirs ( os . path . dirname ( path ) )
if isinstance ( contents , bytes ) :
mode = 'wb+'
else :
mode = 'w'
with open ( path , mode ) as file :
file . write ( contents ) |
def write ( self , string ) :
"""Erase newline from a string and write to the logger .""" | string = string . rstrip ( )
if string : # Don ' t log empty lines
self . logger . critical ( string ) |
def symmetric_decrypt_HMAC ( cyphertext , key , hmac_secret ) :
""": raises : : class : ` RuntimeError ` when HMAC verification fails""" | iv = symmetric_decrypt_iv ( cyphertext , key )
message = symmetric_decrypt_with_iv ( cyphertext , key , iv )
hmac = hmac_sha1 ( hmac_secret , iv [ - 3 : ] + message )
if iv [ : 13 ] != hmac [ : 13 ] :
raise RuntimeError ( "Unable to decrypt message. HMAC does not match." )
return message |
def build_bond ( iface , ** settings ) :
'''Create a bond script in / etc / modprobe . d with the passed settings
and load the bonding kernel module .
CLI Example :
. . code - block : : bash
salt ' * ' ip . build _ bond bond0 mode = balance - alb''' | deb_major = __grains__ [ 'osrelease' ] [ : 1 ]
opts = _parse_settings_bond ( settings , iface )
try :
template = JINJA . get_template ( 'conf.jinja' )
except jinja2 . exceptions . TemplateNotFound :
log . error ( 'Could not load template conf.jinja' )
return ''
data = template . render ( { 'name' : iface , 'bonding' : opts } )
if 'test' in settings and settings [ 'test' ] :
return _read_temp ( data )
_write_file ( iface , data , _DEB_NETWORK_CONF_FILES , '{0}.conf' . format ( iface ) )
path = os . path . join ( _DEB_NETWORK_CONF_FILES , '{0}.conf' . format ( iface ) )
if deb_major == '5' :
for line_type in ( 'alias' , 'options' ) :
cmd = [ 'sed' , '-i' , '-e' , r'/^{0}\s{1}.*/d' . format ( line_type , iface ) , '/etc/modprobe.conf' ]
__salt__ [ 'cmd.run' ] ( cmd , python_shell = False )
__salt__ [ 'file.append' ] ( '/etc/modprobe.conf' , path )
# Load kernel module
__salt__ [ 'kmod.load' ] ( 'bonding' )
# install ifenslave - 2.6
__salt__ [ 'pkg.install' ] ( 'ifenslave-2.6' )
return _read_file ( path ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.