signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_wish_list_by_id ( cls , wish_list_id , ** kwargs ) :
"""Find WishList
Return single instance of WishList by its ID .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . get _ wish _ list _ by _ id ( wish _ list _ id , async = True )
> > > result = thread . get ( )
: param async bool
: param str wish _ list _ id : ID of wishList to return ( required )
: return : WishList
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _get_wish_list_by_id_with_http_info ( wish_list_id , ** kwargs )
else :
( data ) = cls . _get_wish_list_by_id_with_http_info ( wish_list_id , ** kwargs )
return data
|
def getAsTuple ( self , section ) :
"""Get section name tuple
: param section : section name
: return : tuple object"""
|
keys = self . getKeys ( section )
value_dict = self . getValues ( section )
return namedtuple ( section , keys ) ( ** value_dict )
|
def visit_IfStatement ( self , node ) :
"""Visitor for ` IfStatement ` AST node ."""
|
if_conditon , if_body = node . if_compound
self . visit ( if_conditon )
self . visit ( if_body )
for else_if_compound in node . else_if_compounds :
else_if_condition , else_if_body = else_if_compound
self . visit ( else_if_condition )
self . visit ( else_if_body )
if node . else_compound is not None :
_ , else_body = node . else_compound
self . visit ( else_body )
|
def _maybe_get_default_name ( self , name ) :
"""Checks a name and determines whether to use the default name .
: param name : The current name to check .
: return : Either None or a : class : ` str ` representing the name ."""
|
if name is None and self . uri . fragment :
name = self . uri . fragment
return name
|
def apply_string_substitutions ( inputs , substitutions , inverse = False , case_insensitive = False , unused_substitutions = "ignore" , ) :
"""Apply a number of substitutions to a string ( s ) .
The substitutions are applied effectively all at once . This means that conflicting
substitutions don ' t interact . Where substitutions are conflicting , the one which
is longer takes precedance . This is confusing so we recommend that you look at
the examples .
Parameters
inputs : str , list of str
The string ( s ) to which we want to apply the substitutions .
substitutions : dict
The substitutions we wish to make . The keys are the strings we wish to
substitute , the values are the strings which we want to appear in the output
strings .
inverse : bool
If True , do the substitutions the other way around i . e . use the keys as the
strings we want to appear in the output strings and the values as the strings
we wish to substitute .
case _ insensitive : bool
If True , the substitutions will be made in a case insensitive way .
unused _ substitutions : { " ignore " , " warn " , " raise " } , default ignore
Behaviour when one or more of the inputs does not have a corresponding
substitution . If " ignore " , nothing happens . If " warn " , a warning is issued . If
" raise " , an error is raised . See the examples .
Returns
` ` type ( input ) ` `
The input with substitutions performed .
Examples
> > > apply _ string _ substitutions ( " Hello JimBob " , { " Jim " : " Bob " } )
' Hello BobBob '
> > > apply _ string _ substitutions ( " Hello JimBob " , { " Jim " : " Bob " } , inverse = True )
' Hello JimJim '
> > > apply _ string _ substitutions ( [ " Hello JimBob " , " Jim says , ' Hi Bob ' " ] , { " Jim " : " Bob " } )
[ ' Hello BobBob ' , " Bob says , ' Hi Bob ' " ]
> > > apply _ string _ substitutions ( [ " Hello JimBob " , " Jim says , ' Hi Bob ' " ] , { " Jim " : " Bob " } , inverse = True )
[ ' Hello JimJim ' , " Jim says , ' Hi Jim ' " ]
> > > apply _ string _ substitutions ( " Muttons Butter " , { " M " : " B " , " Button " : " Zip " } )
' Buttons Butter '
# Substitutions don ' t cascade . If they did , Muttons would become Buttons , then the
# substitutions " Button " - - > " Zip " would be applied and we would end up with
# " Zips Butter " .
> > > apply _ string _ substitutions ( " Muttons Butter " , { " Mutton " : " Gutter " , " tt " : " zz " } )
' Gutters Buzzer '
# Longer substitutions take precedent . Hence Mutton becomes Gutter , not Muzzon .
> > > apply _ string _ substitutions ( " Butter " , { " buTTer " : " Gutter " } , case _ insensitive = True )
' Gutter '
> > > apply _ string _ substitutions ( " Butter " , { " teeth " : " tooth " } )
' Butter '
> > > apply _ string _ substitutions ( " Butter " , { " teeth " : " tooth " } , unused _ substitutions = " ignore " )
' Butter '
> > > apply _ string _ substitutions ( " Butter " , { " teeth " : " tooth " } , unused _ substitutions = " warn " )
. . . pymagicc / utils . py : 50 : UserWarning : No substitution available for { ' Butter ' } warnings . warn ( msg )
' Butter '
> > > apply _ string _ substitutions ( " Butter " , { " teeth " : " tooth " } , unused _ substitutions = " raise " )
ValueError : No substitution available for { ' Butter ' }"""
|
if inverse :
substitutions = { v : k for k , v in substitutions . items ( ) }
# only possible to have conflicting substitutions when case insensitive
if case_insensitive :
_check_duplicate_substitutions ( substitutions )
if unused_substitutions != "ignore" :
_check_unused_substitutions ( substitutions , inputs , unused_substitutions , case_insensitive )
compiled_regexp = _compile_replacement_regexp ( substitutions , case_insensitive = case_insensitive )
inputs_return = deepcopy ( inputs )
if isinstance ( inputs_return , str ) :
inputs_return = _multiple_replace ( inputs_return , substitutions , compiled_regexp )
else :
inputs_return = [ _multiple_replace ( v , substitutions , compiled_regexp ) for v in inputs_return ]
return inputs_return
|
def read_file ( filename : PathLike = "experiment.yml" ) -> Dict [ str , Any ] :
"""Read and parse yaml file ."""
|
logger . debug ( "Input file: %s" , filename )
with open ( filename , "r" ) as stream :
structure = yaml . safe_load ( stream )
return structure
|
def _get_signed_predecessors ( im , node , polarity ) :
"""Get upstream nodes in the influence map .
Return the upstream nodes along with the overall polarity of the path
to that node by account for the polarity of the path to the given node
and the polarity of the edge between the given node and its immediate
predecessors .
Parameters
im : networkx . MultiDiGraph
Graph containing the influence map .
node : str
The node ( rule name ) in the influence map to get predecessors ( upstream
nodes ) for .
polarity : int
Polarity of the overall path to the given node .
Returns
generator of tuples , ( node , polarity )
Each tuple returned contains two elements , a node ( string ) and the
polarity of the overall path ( int ) to that node ."""
|
signed_pred_list = [ ]
for pred in im . predecessors ( node ) :
pred_edge = ( pred , node )
yield ( pred , _get_edge_sign ( im , pred_edge ) * polarity )
|
def _view_interval ( self , queue_type , queue_id ) :
"""Updates the queue interval in SharQ ."""
|
response = { 'status' : 'failure' }
try :
request_data = json . loads ( request . data )
interval = request_data [ 'interval' ]
except Exception , e :
response [ 'message' ] = e . message
return jsonify ( ** response ) , 400
request_data = { 'queue_type' : queue_type , 'queue_id' : queue_id , 'interval' : interval }
try :
response = self . sq . interval ( ** request_data )
if response [ 'status' ] == 'failure' :
return jsonify ( ** response ) , 404
except Exception , e :
response [ 'message' ] = e . message
return jsonify ( ** response ) , 400
return jsonify ( ** response )
|
def all ( self ) :
"""Primary method to fetch data based on filters
Also trigged when the QuerySet is evaluated by calling one of the following methods :
* len ( )
* bool ( )
* list ( )
* Iteration
* Slicing"""
|
logger . debug ( f'Query `{self.__class__.__name__}` objects with filters {self}' )
# Destroy any cached results
self . _result_cache = None
# Fetch Model class and connected repository from Repository Factory
model_cls = repo_factory . get_model ( self . _entity_cls )
repository = repo_factory . get_repository ( self . _entity_cls )
# order _ by clause must be list of keys
order_by = self . _entity_cls . meta_ . order_by if not self . _order_by else self . _order_by
# Call the read method of the repository
results = repository . filter ( self . _criteria , self . _offset , self . _limit , order_by )
# Convert the returned results to entity and return it
entity_items = [ ]
for item in results . items :
entity = model_cls . to_entity ( item )
entity . state_ . mark_retrieved ( )
entity_items . append ( entity )
results . items = entity_items
# Cache results
self . _result_cache = results
return results
|
def _set_on_startup_overloadtime ( self , v , load = False ) :
"""Setter method for on _ startup _ overloadtime , mapped from YANG variable / routing _ system / router / isis / router _ isis _ cmds _ holder / router _ isis _ attributes / set _ overload _ bit / on _ startup / on _ startup _ overloadtime ( uint32)
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ on _ startup _ overloadtime is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ on _ startup _ overloadtime ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'5..86400' ] } ) , is_leaf = True , yang_name = "on-startup-overloadtime" , rest_name = "on-startup-overloadtime" , parent = self , choice = ( u'ch-on-startup' , u'ca-on-startup' ) , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'cli-full-command' : None , u'cli-drop-node-name' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-isis' , defining_module = 'brocade-isis' , yang_type = 'uint32' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """on_startup_overloadtime must be of a type compatible with uint32""" , 'defined-type' : "uint32" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'5..86400']}), is_leaf=True, yang_name="on-startup-overloadtime", rest_name="on-startup-overloadtime", parent=self, choice=(u'ch-on-startup', u'ca-on-startup'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='uint32', is_config=True)""" , } )
self . __on_startup_overloadtime = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def start ( self , max_val ) :
""": arg max _ val Maximum value
: type max _ val int"""
|
self . _timer . init_timer ( max_value = max_val )
self . _infinite_mode = max_val <= 0
self . _infinite_position = 0
self . _max = max_val
self . _fill_empty ( )
self . _value = 0
self . progress ( 0 )
self . _status = ProgressBarStatus . started
|
def wav_length ( fn : str ) -> float :
"""Returns the length of the WAV file in seconds ."""
|
args = [ config . SOX_PATH , fn , "-n" , "stat" ]
p = subprocess . Popen ( args , stdin = PIPE , stdout = PIPE , stderr = PIPE )
length_line = str ( p . communicate ( ) [ 1 ] ) . split ( "\\n" ) [ 1 ] . split ( )
print ( length_line )
assert length_line [ 0 ] == "Length"
return float ( length_line [ - 1 ] )
|
def build_feature_collection ( node , name = None ) :
"""Build and return a ( decoded ) GeoJSON FeatureCollection corresponding to this KML DOM node ( typically a KML Folder ) .
If a name is given , store it in the FeatureCollection ' s ` ` ' name ' ` ` attribute ."""
|
# Initialize
geojson = { 'type' : 'FeatureCollection' , 'features' : [ ] , }
# Build features
for placemark in get ( node , 'Placemark' ) :
feature = build_feature ( placemark )
if feature is not None :
geojson [ 'features' ] . append ( feature )
# Give the collection a name if requested
if name is not None :
geojson [ 'name' ] = name
return geojson
|
def _delete_images ( self , instance ) :
"""Deletes all user media images of the given instance ."""
|
UserMediaImage . objects . filter ( content_type = ContentType . objects . get_for_model ( instance ) , object_id = instance . pk , user = instance . user , ) . delete ( )
|
def ajax_only ( view_func ) :
"""Required the view is only accessed via AJAX ."""
|
@ wraps ( view_func , assigned = available_attrs ( view_func ) )
def _wrapped_view ( request , * args , ** kwargs ) :
if request . is_ajax ( ) :
return view_func ( request , * args , ** kwargs )
else :
return http . HttpResponseBadRequest ( )
return _wrapped_view
|
def compute_strategy ( self , grads ) :
"""Returns :
bool - False if grads cannot be packed due to various reasons ."""
|
for g in grads :
assert g . shape . is_fully_defined ( ) , "Shape of {} is {}!" . format ( g . name , g . shape )
self . _shapes = [ g . shape for g in grads ]
self . _sizes = [ g . shape . num_elements ( ) for g in grads ]
self . _total_size = sum ( self . _sizes )
if self . _total_size / self . _num_split < 1024 :
logger . info ( "Skip GradientPacker due to too few gradients." )
return False
# should have the same dtype
dtypes = set ( [ g . dtype for g in grads ] )
if len ( dtypes ) != 1 :
logger . info ( "Skip GradientPacker due to inconsistent gradient types." )
return False
self . _grad_dtype = grads [ 0 ] . dtype
split_size = self . _total_size // self . _num_split
split_size_last = self . _total_size - split_size * ( self . _num_split - 1 )
self . _split_sizes = [ split_size ] * ( self . _num_split - 1 ) + [ split_size_last ]
logger . info ( "Will pack {} gradients of total dimension={} into {} splits." . format ( len ( self . _sizes ) , self . _total_size , self . _num_split ) )
return True
|
def stmt2dzn ( name , val , declare = True , assign = True , wrap = True ) :
"""Returns a dzn statement declaring and assigning the given value .
Parameters
val
The value to serialize .
declare : bool
Whether to include the declaration of the variable in the statement or
just the assignment .
assign : bool
Wheter to include the assignment of the value in the statement or just
the declaration .
wrap : bool
Whether to wrap the serialized value .
Returns
str
The serialized dzn representation of the value ."""
|
if not ( declare or assign ) :
raise ValueError ( 'The statement must be a declaration or an assignment.' )
stmt = [ ]
if declare :
val_type = _dzn_type ( val )
stmt . append ( '{}: ' . format ( val_type ) )
stmt . append ( name )
if assign :
val_str = val2dzn ( val , wrap = wrap )
stmt . append ( ' = {}' . format ( val_str ) )
stmt . append ( ';' )
return '' . join ( stmt )
|
def get_interface_detail_output_interface_port_role ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
get_interface_detail = ET . Element ( "get_interface_detail" )
config = get_interface_detail
output = ET . SubElement ( get_interface_detail , "output" )
interface = ET . SubElement ( output , "interface" )
interface_type_key = ET . SubElement ( interface , "interface-type" )
interface_type_key . text = kwargs . pop ( 'interface_type' )
interface_name_key = ET . SubElement ( interface , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
port_role = ET . SubElement ( interface , "port-role" )
port_role . text = kwargs . pop ( 'port_role' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get_dict_default ( obj , key ) :
"""obj MUST BE A DICT
key IS EXPECTED TO BE LITERAL ( NO ESCAPING )
TRY BOTH ATTRIBUTE AND ITEM ACCESS , OR RETURN Null"""
|
try :
return obj [ key ]
except Exception as f :
pass
try :
if float ( key ) == round ( float ( key ) , 0 ) :
return obj [ int ( key ) ]
except Exception as f :
pass
return NullType ( obj , key )
|
def n_to_one ( num_inputs , num_streams , bits_per_axis , weight_per_axis ) :
"""Creating inputs of the form :
` ` ( x , y , . . . , y ) ' '
Here n = num _ streams - 1.
To be more precise , for each component we allocate a fixed number
of units ` bits _ per _ axis ` and encode each component with a scalar encoder .
This is a toy example for the following scenario : We are given 4 input streams ,
such that 3 mutually determine each other , and the remaining one is independent from the rest ."""
|
num_bits = num_streams * bits_per_axis
# Setting up the scalar encoder
encoder_params = { "dimensions" : num_streams , "max_values" : [ [ 0. , 1. ] ] * num_streams , "bits_per_axis" : [ bits_per_axis ] * num_streams , "weight_per_axis" : [ weight_per_axis ] * num_streams , "wrap_around" : False }
scalar_encode = ScalarEncoder ( ** encoder_params )
xs = np . random . sample ( num_inputs )
ys = np . random . sample ( num_inputs )
input_vectors = np . zeros ( ( num_inputs , num_bits ) )
for i in range ( num_inputs ) :
input_vectors [ i ] = scalar_encode ( [ xs [ i ] ] + [ ys [ i ] ] * ( num_streams - 1 ) )
return input_vectors
|
def get_email_context ( self , ** kwargs ) :
'''Overrides EmailRecipientMixin'''
|
includeName = kwargs . pop ( 'includeName' , True )
context = super ( EventRegistration , self ) . get_email_context ( ** kwargs )
context . update ( { 'title' : self . event . name , 'start' : self . event . firstOccurrenceTime , 'end' : self . event . lastOccurrenceTime , } )
if includeName :
context . update ( { 'first_name' : self . registration . customer . first_name , 'last_name' : self . registration . customer . last_name , } )
return context
|
def check_password ( self , hashed_password , plain_password ) :
"""Encode the plain _ password with the salt of the hashed _ password .
Return the comparison with the encrypted _ password ."""
|
salt , encrypted_password = hashed_password . split ( '$' )
re_encrypted_password = self . get_hash ( salt , plain_password )
return encrypted_password == re_encrypted_password
|
def _pypi_head_package ( dependency ) :
"""Hit pypi with a http HEAD to check if pkg _ name exists ."""
|
if dependency . specs :
_ , version = dependency . specs [ 0 ]
url = BASE_PYPI_URL_WITH_VERSION . format ( name = dependency . project_name , version = version )
else :
url = BASE_PYPI_URL . format ( name = dependency . project_name )
logger . debug ( "Doing HEAD requests against %s" , url )
req = request . Request ( url , method = 'HEAD' )
try :
response = request . urlopen ( req )
except HTTPError as http_error :
if http_error . code == HTTP_STATUS_NOT_FOUND :
return False
else :
raise
if response . status == HTTP_STATUS_OK :
logger . debug ( "%r exists in PyPI." , dependency )
return True
else : # Maybe we are getting somethink like a redirect . In this case we are only
# warning to the user and trying to install the dependency .
# In the worst scenery fades will fail to install it .
logger . warning ( "Got a (unexpected) HTTP_STATUS=%r and reason=%r checking if %r exists" , response . status , response . reason , dependency )
return True
|
def split_input ( cls , job_config ) :
"""Inherit docs ."""
|
params = job_config . input_reader_params
count = params [ cls . COUNT ]
string_length = params . get ( cls . STRING_LENGTH , cls . _DEFAULT_STRING_LENGTH )
shard_count = job_config . shard_count
count_per_shard = count // shard_count
mr_input_readers = [ cls ( count_per_shard , string_length ) for _ in range ( shard_count ) ]
left = count - count_per_shard * shard_count
if left > 0 :
mr_input_readers . append ( cls ( left , string_length ) )
return mr_input_readers
|
def username_validator ( self , form , field ) :
"""Ensure that Usernames contains at least 3 alphanumeric characters .
Override this method to customize the username validator ."""
|
username = field . data
if len ( username ) < 3 :
raise ValidationError ( _ ( 'Username must be at least 3 characters long' ) )
valid_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-._'
chars = list ( username )
for char in chars :
if char not in valid_chars :
raise ValidationError ( _ ( "Username may only contain letters, numbers, '-', '.' and '_'" ) )
|
def update_letter ( self , letter_id , letter_dict ) :
"""Updates a letter
: param letter _ id : the letter id
: param letter _ dict : dict
: return : dict"""
|
return self . _create_put_request ( resource = LETTERS , billomat_id = letter_id , send_data = letter_dict )
|
def snmp_server_group_group_name ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
group = ET . SubElement ( snmp_server , "group" )
group_version_key = ET . SubElement ( group , "group-version" )
group_version_key . text = kwargs . pop ( 'group_version' )
group_name = ET . SubElement ( group , "group-name" )
group_name . text = kwargs . pop ( 'group_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def get_loggable_url ( url ) :
"""Strip out secrets from taskcluster urls .
Args :
url ( str ) : the url to strip
Returns :
str : the loggable url"""
|
loggable_url = url or ""
for secret_string in ( "bewit=" , "AWSAccessKeyId=" , "access_token=" ) :
parts = loggable_url . split ( secret_string )
loggable_url = parts [ 0 ]
if loggable_url != url :
loggable_url = "{}<snip>" . format ( loggable_url )
return loggable_url
|
def check_odd ( num ) :
"""Python function to verify if a number is odd utilizing bitwise operators .
> > > check _ odd ( 5)
True
> > > check _ odd ( 6)
False
> > > check _ odd ( 7)
True"""
|
return True if ( num ^ 1 ) == ( num - 1 ) else False
|
def gen_scripts ( file_name , file_name_ext , obj_name , obj_ext_name , output , output_ext , field = 1 , notexplicit = None , ascii_props = False , append = False , prefix = "" ) :
"""Generate ` script ` property ."""
|
obj = { }
obj2 = { }
aliases = { }
with codecs . open ( os . path . join ( HOME , 'unicodedata' , UNIVERSION , 'PropertyValueAliases.txt' ) , 'r' , 'utf-8' ) as uf :
for line in uf :
if line . startswith ( 'sc ;' ) :
values = line . split ( ';' )
aliases [ format_name ( values [ 1 ] . strip ( ) ) ] = format_name ( values [ 2 ] . strip ( ) )
with codecs . open ( os . path . join ( HOME , 'unicodedata' , UNIVERSION , file_name_ext ) , 'r' , 'utf-8' ) as uf :
for line in uf :
if not line . startswith ( '#' ) :
data = line . split ( '#' ) [ 0 ] . split ( ';' )
if len ( data ) < 2 :
continue
exts = [ aliases [ format_name ( n ) ] for n in data [ 1 ] . strip ( ) . split ( ' ' ) ]
span = create_span ( [ int ( i , 16 ) for i in data [ 0 ] . strip ( ) . split ( '..' ) ] , is_bytes = ascii_props )
for ext in exts :
if ext not in obj2 :
obj2 [ ext ] = [ ]
if span is None :
continue
obj2 [ ext ] . extend ( span )
with codecs . open ( os . path . join ( HOME , 'unicodedata' , UNIVERSION , file_name ) , 'r' , 'utf-8' ) as uf :
for line in uf :
if not line . startswith ( '#' ) :
data = line . split ( '#' ) [ 0 ] . split ( ';' )
if len ( data ) < 2 :
continue
span = create_span ( [ int ( i , 16 ) for i in data [ 0 ] . strip ( ) . split ( '..' ) ] , is_bytes = ascii_props )
name = format_name ( data [ 1 ] )
if name not in obj :
obj [ name ] = [ ]
if name not in obj2 :
obj2 [ name ] = [ ]
if span is None :
continue
obj [ name ] . extend ( span )
obj2 [ name ] . extend ( span )
for name in list ( obj . keys ( ) ) :
s = set ( obj [ name ] )
obj [ name ] = sorted ( s )
for name in list ( obj2 . keys ( ) ) :
s = set ( obj2 [ name ] )
obj2 [ name ] = sorted ( s )
if notexplicit :
not_explicitly_defined ( obj , notexplicit , is_bytes = ascii_props )
not_explicitly_defined ( obj2 , notexplicit , is_bytes = ascii_props )
# Convert characters values to ranges
char2range ( obj , is_bytes = ascii_props )
char2range ( obj2 , is_bytes = ascii_props )
with codecs . open ( output , 'a' if append else 'w' , 'utf-8' ) as f :
if not append :
f . write ( HEADER )
# Write out the Unicode properties
f . write ( '%s_%s = {\n' % ( prefix , obj_name ) )
count = len ( obj ) - 1
i = 0
for k1 , v1 in sorted ( obj . items ( ) ) :
f . write ( ' "%s": "%s"' % ( k1 , v1 ) )
if i == count :
f . write ( '\n}\n' )
else :
f . write ( ',\n' )
i += 1
with codecs . open ( output_ext , 'a' if append else 'w' , 'utf-8' ) as f :
if not append :
f . write ( HEADER )
# Write out the Unicode properties
f . write ( '%s_%s = {\n' % ( prefix , obj_ext_name ) )
count = len ( obj2 ) - 1
i = 0
for k1 , v1 in sorted ( obj2 . items ( ) ) :
f . write ( ' "%s": "%s"' % ( k1 , v1 ) )
if i == count :
f . write ( '\n}\n' )
else :
f . write ( ',\n' )
i += 1
|
def setData ( self , index , value , role = Qt . EditRole ) :
"""Cell content change"""
|
if not index . isValid ( ) or self . readonly :
return False
i = index . row ( )
j = index . column ( )
value = from_qvariant ( value , str )
dtype = self . _data . dtype . name
if dtype == "bool" :
try :
val = bool ( float ( value ) )
except ValueError :
val = value . lower ( ) == "true"
elif dtype . startswith ( "string" ) or dtype . startswith ( "bytes" ) :
val = to_binary_string ( value , 'utf8' )
elif dtype . startswith ( "unicode" ) or dtype . startswith ( "str" ) :
val = to_text_string ( value )
else :
if value . lower ( ) . startswith ( 'e' ) or value . lower ( ) . endswith ( 'e' ) :
return False
try :
val = complex ( value )
if not val . imag :
val = val . real
except ValueError as e :
QMessageBox . critical ( self . dialog , "Error" , "Value error: %s" % str ( e ) )
return False
try :
self . test_array [ 0 ] = val
# will raise an Exception eventually
except OverflowError as e :
print ( "OverflowError: " + str ( e ) )
# spyder : test - skip
QMessageBox . critical ( self . dialog , "Error" , "Overflow error: %s" % str ( e ) )
return False
# Add change to self . changes
self . changes [ ( i , j ) ] = val
self . dataChanged . emit ( index , index )
if not is_string ( val ) :
if val > self . vmax :
self . vmax = val
if val < self . vmin :
self . vmin = val
return True
|
def wrap ( s , prefix = r'\b' , suffix = r'\b' , grouper = '()' ) :
r"""Wrap a string ( tyically a regex ) with a prefix and suffix ( usually a nonconuming word break )
Arguments :
prefix , suffix ( str ) : strings to append to the front and back of the provided string
grouper ( 2 - len str or 2 - tuple ) : characters or strings to separate prefix and suffix from the middle
> > > wrap ( r ' \ w * ' )
' \ \ b ( \ \ w * ) \ \ b '
> > > wrap ( r ' middle ' , prefix = None )
' ( middle ) \ \ b '"""
|
return ( ( prefix or '' ) + try_get ( grouper , 0 , '' ) + ( s or '' ) + try_get ( grouper , 1 , try_get ( grouper , 0 , '' ) ) + ( suffix or '' ) )
|
def prices ( self ) :
"""TimeSeries of prices ."""
|
if self . root . stale :
self . root . update ( self . now , None )
return self . _prices . loc [ : self . now ]
|
def _find_next_widget ( self , direction , stay_in_col = False , start_at = None , wrap = False ) :
"""Find the next widget to get the focus , stopping at the start / end of the list if hit .
: param direction : The direction to move through the widgets .
: param stay _ in _ col : Whether to limit search to current column .
: param start _ at : Optional starting point in current column .
: param wrap : Whether to wrap around columns when at the end ."""
|
current_widget = self . _live_widget
current_col = self . _live_col
if start_at is not None :
self . _live_widget = start_at
still_looking = True
while still_looking :
while 0 <= self . _live_col < len ( self . _columns ) :
self . _live_widget += direction
while 0 <= self . _live_widget < len ( self . _columns [ self . _live_col ] ) :
widget = self . _columns [ self . _live_col ] [ self . _live_widget ]
if widget . is_tab_stop and not widget . disabled :
return
self . _live_widget += direction
if stay_in_col : # Don ' t move to another column - just stay where we are .
self . _live_widget = current_widget
break
else :
self . _live_col += direction
self . _live_widget = - 1 if direction > 0 else len ( self . _columns [ self . _live_col ] )
if self . _live_col == current_col : # We ' ve wrapped all the way back to the same column -
# give up now and stay where we were .
self . _live_widget = current_widget
return
# If we got here we hit the end of the columns - only keep on
# looking if we ' re allowed to wrap .
still_looking = wrap
if still_looking :
if self . _live_col < 0 :
self . _live_col = len ( self . _columns ) - 1
else :
self . _live_col = 0
|
def is_complex ( arg ) :
'''is _ complex ( x ) yields True if x is a complex numeric object and False otherwise . Note that this
includes anything representable as as a complex number such as an integer or a boolean value .
In effect , this makes this function an alias for is _ number ( arg ) .'''
|
return ( is_complex ( mag ( arg ) ) if is_quantity ( arg ) else True if isinstance ( arg , numbers . Complex ) else is_npscalar ( arg , 'complex' ) or is_npvalue ( arg , 'complex' ) )
|
def IOW ( type , nr , size ) :
"""An ioctl with write parameters .
size ( ctype type or instance )
Type / structure of the argument passed to ioctl ' s " arg " argument ."""
|
return IOC ( IOC_WRITE , type , nr , IOC_TYPECHECK ( size ) )
|
def MaxPool3D ( a , k , strides , padding ) :
"""Maximum 3D pooling op ."""
|
patches = _pool_patches ( a , k , strides , padding . decode ( "ascii" ) )
return np . amax ( patches , axis = tuple ( range ( - len ( k ) , 0 ) ) ) ,
|
def join_struct_arrays ( arrays ) :
"""Takes a list of possibly structured arrays , concatenates their
dtypes , and returns one big array with that dtype . Does the
inverse of ` ` separate _ struct _ array ` ` .
: param list arrays : List of ` ` np . ndarray ` ` s"""
|
# taken from http : / / stackoverflow . com / questions / 5355744 / numpy - joining - structured - arrays
sizes = np . array ( [ a . itemsize for a in arrays ] )
offsets = np . r_ [ 0 , sizes . cumsum ( ) ]
shape = arrays [ 0 ] . shape
joint = np . empty ( shape + ( offsets [ - 1 ] , ) , dtype = np . uint8 )
for a , size , offset in zip ( arrays , sizes , offsets ) :
joint [ ... , offset : offset + size ] = np . atleast_1d ( a ) . view ( np . uint8 ) . reshape ( shape + ( size , ) )
dtype = sum ( ( a . dtype . descr for a in arrays ) , [ ] )
return joint . ravel ( ) . view ( dtype )
|
def _interval_string_to_seconds ( interval_string ) :
"""Convert internal string like 1M , 1Y3M , 3W to seconds .
: type interval _ string : str
: param interval _ string : Interval string like 1M , 1W , 1M3W4h2s . . .
( s = > seconds , m = > minutes , h = > hours , D = > days ,
W = > weeks , M = > months , Y = > Years ) .
: rtype : int
: return : The conversion in seconds of interval _ string ."""
|
interval_exc = "Bad interval format for {0}" . format ( interval_string )
interval_dict = { "s" : 1 , "m" : 60 , "h" : 3600 , "D" : 86400 , "W" : 7 * 86400 , "M" : 30 * 86400 , "Y" : 365 * 86400 }
interval_regex = re . compile ( "^(?P<num>[0-9]+)(?P<ext>[smhDWMY])" )
seconds = 0
while interval_string :
match = interval_regex . match ( interval_string )
if match :
num , ext = int ( match . group ( "num" ) ) , match . group ( "ext" )
if num > 0 and ext in interval_dict :
seconds += num * interval_dict [ ext ]
interval_string = interval_string [ match . end ( ) : ]
else :
raise Exception ( interval_exc )
else :
raise Exception ( interval_exc )
return seconds
|
def densenet121 ( num_classes = 1000 , pretrained = 'imagenet' ) :
r"""Densenet - 121 model from
` " Densely Connected Convolutional Networks " < https : / / arxiv . org / pdf / 1608.06993 . pdf > `"""
|
model = models . densenet121 ( pretrained = False )
if pretrained is not None :
settings = pretrained_settings [ 'densenet121' ] [ pretrained ]
model = load_pretrained ( model , num_classes , settings )
model = modify_densenets ( model )
return model
|
def mount ( self , fstype = None ) :
"""Based on the file system type as determined by : func : ` determine _ fs _ type ` , the proper mount command is executed
for this volume . The volume is mounted in a temporary path ( or a pretty path if : attr : ` pretty ` is enabled ) in
the mountpoint as specified by : attr : ` mountpoint ` .
If the file system type is a LUKS container or LVM , additional methods may be called , adding subvolumes to
: attr : ` volumes `
: raises NotMountedError : if the parent volume / disk is not mounted
: raises NoMountpointAvailableError : if no mountpoint was found
: raises NoLoopbackAvailableError : if no loopback device was found
: raises UnsupportedFilesystemError : if the fstype is not supported for mounting
: raises SubsystemError : if one of the underlying commands failed"""
|
if not self . parent . is_mounted :
raise NotMountedError ( self . parent )
if fstype is None :
fstype = self . determine_fs_type ( )
self . _load_fsstat_data ( )
# Prepare mount command
try :
fstype . mount ( self )
self . was_mounted = True
self . is_mounted = True
self . fstype = fstype
except Exception as e :
logger . exception ( "Execution failed due to {} {}" . format ( type ( e ) , e ) , exc_info = True )
if not isinstance ( e , ImageMounterError ) :
raise SubsystemError ( e )
else :
raise
|
def get_infix_items ( tokens , callback = infix_error ) :
"""Perform infix token processing .
Takes a callback that ( takes infix tokens and returns a string ) to handle inner infix calls ."""
|
internal_assert ( len ( tokens ) >= 3 , "invalid infix tokens" , tokens )
( arg1 , func , arg2 ) , tokens = tokens [ : 3 ] , tokens [ 3 : ]
args = list ( arg1 ) + list ( arg2 )
while tokens :
args = [ callback ( [ args , func , [ ] ] ) ]
( func , newarg ) , tokens = tokens [ : 2 ] , tokens [ 2 : ]
args += list ( newarg )
return func , args
|
def import_xml ( self , xml_gzipped_file_path , taxids = None , silent = False ) :
"""Imports XML
: param str xml _ gzipped _ file _ path : path to XML file
: param Optional [ list [ int ] ] taxids : NCBI taxonomy identifier
: param bool silent : no output if True"""
|
version = self . session . query ( models . Version ) . filter ( models . Version . knowledgebase == 'Swiss-Prot' ) . first ( )
version . import_start_date = datetime . now ( )
entry_xml = '<entries>'
number_of_entries = 0
interval = 1000
start = False
if sys . platform in ( 'linux' , 'linux2' , 'darwin' ) :
log . info ( 'Load gzipped XML from {}' . format ( xml_gzipped_file_path ) )
zcat_command = 'gzcat' if sys . platform == 'darwin' else 'zcat'
number_of_lines = int ( getoutput ( "{} {} | wc -l" . format ( zcat_command , xml_gzipped_file_path ) ) )
tqdm_desc = 'Import {} lines' . format ( number_of_lines )
else :
print ( 'bin was anderes' )
number_of_lines = None
tqdm_desc = None
with gzip . open ( xml_gzipped_file_path ) as fd :
for line in tqdm ( fd , desc = tqdm_desc , total = number_of_lines , mininterval = 1 , disable = silent ) :
end_of_file = line . startswith ( b"</uniprot>" )
if line . startswith ( b"<entry " ) :
start = True
elif end_of_file :
start = False
if start :
entry_xml += line . decode ( "utf-8" )
if line . startswith ( b"</entry>" ) or end_of_file :
number_of_entries += 1
start = False
if number_of_entries == interval or end_of_file :
entry_xml += "</entries>"
self . insert_entries ( entry_xml , taxids )
if end_of_file :
break
else :
entry_xml = "<entries>"
number_of_entries = 0
version . import_completed_date = datetime . now ( )
self . session . commit ( )
|
def optimize_forecasting_method ( self , timeSeries , forecastingMethod ) :
"""Optimizes the parameters for the given timeSeries and forecastingMethod .
: param TimeSeries timeSeries : TimeSeries instance , containing hte original data .
: param BaseForecastingMethod forecastingMethod : ForecastingMethod that is used to optimize the parameters .
: return : Returns a tuple containing only the smallest BaseErrorMeasure instance as defined in
: py : meth : ` BaseOptimizationMethod . _ _ init _ _ ` and the forecastingMethods parameter .
: rtype : tuple"""
|
tuneableParameters = forecastingMethod . get_optimizable_parameters ( )
remainingParameters = [ ]
for tuneableParameter in tuneableParameters :
remainingParameters . append ( [ tuneableParameter , [ item for item in self . _generate_next_parameter_value ( tuneableParameter , forecastingMethod ) ] ] )
# Collect the forecasting results
forecastingResults = self . optimization_loop ( timeSeries , forecastingMethod , remainingParameters )
# Debugging GridSearchTest . inner _ optimization _ result _ test
# print " "
# print " GridSearch "
# print " Instance / SMAPE / Alpha "
# for item in forecastingResults :
# print " % s / % s / % s " % (
# str ( item [ 0 ] ) [ - 12 : - 1 ] ,
# str ( item [ 0 ] . get _ error ( self . _ startingPercentage , self . _ endPercentage ) ) [ : 8 ] ,
# item [ 1 ] [ " smoothingFactor " ]
# print " "
# Collect the parameters that resulted in the smallest error
bestForecastingResult = min ( forecastingResults , key = lambda item : item [ 0 ] . get_error ( self . _startingPercentage , self . _endPercentage ) )
# return the determined parameters
return bestForecastingResult
|
def get_records ( self , name ) :
"""Return all the records for the given name in the cache .
Args :
name ( string ) : The name which the required models are stored under .
Returns :
list : A list of : class : ` cinder _ data . model . CinderModel ` models ."""
|
if name in self . _cache :
return self . _cache [ name ] . values ( )
else :
return [ ]
|
def _strip_leading_zeros ( coeffs , threshold = _COEFFICIENT_THRESHOLD ) :
r"""Strip leading zero coefficients from a polynomial .
. . note : :
This assumes the polynomial : math : ` f ` defined by ` ` coeffs ` `
has been normalized ( via : func : ` . normalize _ polynomial ` ) .
Args :
coeffs ( numpy . ndarray ) : ` ` d + 1 ` ` - array of coefficients in monomial /
power basis .
threshold ( Optional [ float ] ) : The point : math : ` \ tau ` below which a
a coefficient will be considered to be numerically zero .
Returns :
numpy . ndarray : The same coefficients without any unnecessary zero
terms ."""
|
while np . abs ( coeffs [ - 1 ] ) < threshold :
coeffs = coeffs [ : - 1 ]
return coeffs
|
def _press_special_key ( self , key , down ) :
"""Helper method for special keys .
Source : http : / / stackoverflow . com / questions / 11045814 / emulate - media - key - press - on - mac"""
|
key_code = special_key_translate_table [ key ]
ev = NSEvent . otherEventWithType_location_modifierFlags_timestamp_windowNumber_context_subtype_data1_data2_ ( NSSystemDefined , # type
( 0 , 0 ) , # location
0xa00 if down else 0xb00 , # flags
0 , # timestamp
0 , # window
0 , # ctx
8 , # subtype
( key_code << 16 ) | ( ( 0xa if down else 0xb ) << 8 ) , # data1
- 1 # data2
)
Quartz . CGEventPost ( 0 , ev . Quartz . CGEvent ( ) )
|
def filter_geometry ( queryset , ** filters ) :
"""Helper function for spatial lookups filters .
Provide spatial lookup types as keywords without underscores instead of the
usual " geometryfield _ _ lookuptype " format ."""
|
fieldname = geo_field ( queryset ) . name
query = { '%s__%s' % ( fieldname , k ) : v for k , v in filters . items ( ) }
return queryset . filter ( ** query )
|
def check_name_collision ( self , name , block_id , checked_ops ) :
"""Are there any colliding names in this block ?
Set the ' _ _ collided _ _ ' flag and related flags if so , so we don ' t commit them .
Not called directly ; called by the @ state _ create ( ) decorator in blockstack . lib . operations . register"""
|
return self . check_collision ( "name" , name , block_id , checked_ops , OPCODE_NAME_STATE_CREATIONS )
|
def smooth_n_point ( scalar_grid , n = 5 , passes = 1 ) :
"""Filter with normal distribution of weights .
Parameters
scalar _ grid : array - like or ` pint . Quantity `
Some 2D scalar grid to be smoothed .
n : int
The number of points to use in smoothing , only valid inputs
are 5 and 9 . Defaults to 5.
passes : int
The number of times to apply the filter to the grid . Defaults
to 1.
Returns
array - like or ` pint . Quantity `
The filtered 2D scalar grid .
Notes
This function is a close replication of the GEMPAK function SM5S
and SM9S depending on the choice of the number of points to use
for smoothing . This function can be applied multiple times to
create a more smoothed field and will only smooth the interior
points , leaving the end points with their original values . If a
masked value or NaN values exists in the array , it will propagate
to any point that uses that particular grid point in the smoothing
calculation . Applying the smoothing function multiple times will
propogate NaNs further throughout the domain ."""
|
if n == 9 :
p = 0.25
q = 0.125
r = 0.0625
elif n == 5 :
p = 0.5
q = 0.125
r = 0.0
else :
raise ValueError ( 'The number of points to use in the smoothing ' 'calculation must be either 5 or 9.' )
smooth_grid = scalar_grid [ : ] . copy ( )
for _i in range ( passes ) :
smooth_grid [ 1 : - 1 , 1 : - 1 ] = ( p * smooth_grid [ 1 : - 1 , 1 : - 1 ] + q * ( smooth_grid [ 2 : , 1 : - 1 ] + smooth_grid [ 1 : - 1 , 2 : ] + smooth_grid [ : - 2 , 1 : - 1 ] + smooth_grid [ 1 : - 1 , : - 2 ] ) + r * ( smooth_grid [ 2 : , 2 : ] + smooth_grid [ 2 : , : - 2 ] + + smooth_grid [ : - 2 , 2 : ] + smooth_grid [ : - 2 , : - 2 ] ) )
return smooth_grid
|
def change_logger_levels ( logger = None , level = logging . DEBUG ) :
"""Go through the logger and handlers and update their levels to the
one specified .
: param logger : logging name or object to modify , defaults to root logger
: param level : logging level to set at ( 10 = Debug , 20 = Info , 30 = Warn , 40 = Error )"""
|
if not isinstance ( logger , logging . Logger ) :
logger = logging . getLogger ( logger )
logger . setLevel ( level )
for handler in logger . handlers :
handler . level = level
|
def cd_ctx ( directory ) :
"""Context manager . Stores current dir , then changes to given directory .
At the end it changes back .
: param directory :
: return :"""
|
prevdir = os . path . abspath ( os . curdir )
if os . path . isdir ( directory ) :
os . chdir ( directory )
yield
os . chdir ( prevdir )
|
def get_hierarchy_admin_session ( self , proxy ) :
"""Gets the hierarchy administrative session .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . hierarchy . HierarchyAdminSession ) - a
` ` HierarchyAdminSession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ hierarchy _ admin ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ hierarchy _ admin ( ) ` ` is ` ` true ` ` . *"""
|
if not self . supports_hierarchy_admin ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . HierarchyAdminSession ( proxy = proxy , runtime = self . _runtime )
|
def _get_fans ( shape ) :
r"""Returns the size of input dimension and output dimension , given ` shape ` .
Args :
shape : A list of integers .
Returns :
fan _ in : An int . The value of input dimension .
fan _ out : An int . The value of output dimension ."""
|
if len ( shape ) == 2 :
fan_in = shape [ 0 ]
fan_out = shape [ 1 ]
elif len ( shape ) == 4 or len ( shape ) == 5 : # assuming convolution kernels ( 2D or 3D ) .
kernel_size = np . prod ( shape [ : 2 ] )
fan_in = shape [ - 2 ] * kernel_size
fan_out = shape [ - 1 ] * kernel_size
else : # no specific assumptions
fan_in = np . sqrt ( np . prod ( shape ) )
fan_out = np . sqrt ( np . prod ( shape ) )
return fan_in , fan_out
|
def convertbinary ( value , argument ) :
"""Convert text to binary form or backwards .
: type value : string
: param value : The text or the binary text
: type argument : string
: param argument : The action to perform on the value . Can be " to " or " from " ."""
|
if argument == 'to' :
return bin ( value )
elif argument == 'from' :
return format ( value )
raise ValueError ( "Invalid argument specified." )
|
def canonical_headers ( self , headers_to_sign ) :
"""Return the headers that need to be included in the StringToSign
in their canonical form by converting all header keys to lower
case , sorting them in alphabetical order and then joining
them into a string , separated by newlines ."""
|
l = [ '%s:%s' % ( n . lower ( ) . strip ( ) , headers_to_sign [ n ] . strip ( ) ) for n in headers_to_sign ]
l . sort ( )
return '\n' . join ( l )
|
def getAnalogID ( self , num ) :
"""Returns the COMTRADE ID of a given channel number .
The number to be given is the same of the COMTRADE header ."""
|
listidx = self . An . index ( num )
# Get the position of the channel number .
return self . Ach_id [ listidx ]
|
def import_tf_tensor ( self , x , tf_x ) :
"""Import a tf . Tensor , producing a LaidOutTensor .
Args :
x : a Tensor
tf _ x : a tf . Tensor
Returns :
a LaidOutTensor"""
|
return self . LaidOutTensor ( self . make_slices ( tf_x , x . shape ) )
|
def virtual_resource ( self ) :
"""Available on a Master Engine only .
To get all virtual resources call : :
engine . virtual _ resource . all ( )
: raises UnsupportedEngineFeature : master engine only
: rtype : CreateCollection ( VirtualResource )"""
|
resource = create_collection ( self . get_relation ( 'virtual_resources' , UnsupportedEngineFeature ) , VirtualResource )
resource . _load_from_engine ( self , 'virtualResources' )
return resource
|
def close ( self ) :
"""Close the underlying hdf5 file"""
|
if self . parent != ( ) :
self . parent . flush ( )
self . parent . close ( )
if self . hdf5 : # is open
self . hdf5 . flush ( )
self . hdf5 . close ( )
self . hdf5 = ( )
|
def _update_buffers ( self , from_step , data , offset , is_last ) :
"""Update the buffers of all steps that need data from ` ` from _ step ` ` .
If ` ` from _ step ` ` is None it means the data is the input data ."""
|
for to_step , buffer in self . target_buffers [ from_step ] :
parent_index = 0
# if there multiple inputs we have to get the correct index , to keep the ordering
if isinstance ( to_step , Reduction ) :
parent_index = to_step . parents . index ( from_step )
buffer . update ( data , offset , is_last , buffer_index = parent_index )
|
def _get_property_columns ( tabletype , columns ) :
"""Returns list of GPS columns required to read gpsproperties for a table
Examples
> > > _ get _ property _ columns ( lsctables . SnglBurstTable , [ ' peak ' ] )
[ ' peak _ time ' , ' peak _ time _ ns ' ]"""
|
from ligo . lw . lsctables import gpsproperty as GpsProperty
# get properties for row object
rowvars = vars ( tabletype . RowType )
# build list of real column names for fancy properties
extracols = { }
for key in columns :
prop = rowvars [ key ]
if isinstance ( prop , GpsProperty ) :
extracols [ key ] = ( prop . s_name , prop . ns_name )
return extracols
|
def __parse_domain_to_employer_line ( self , raw_domain , raw_org ) :
"""Parse domain to employer lines"""
|
d = re . match ( self . DOMAIN_REGEX , raw_domain , re . UNICODE )
if not d :
cause = "invalid domain format: '%s'" % raw_domain
raise InvalidFormatError ( cause = cause )
dom = d . group ( 'domain' ) . strip ( )
o = re . match ( self . ORGANIZATION_REGEX , raw_org , re . UNICODE )
if not o :
cause = "invalid organization format: '%s'" % raw_org
raise InvalidFormatError ( cause = cause )
org = o . group ( 'organization' ) . strip ( )
org = self . __encode ( org )
dom = self . __encode ( dom )
return org , dom
|
def connectRoute ( amp , router , receiver , protocol ) :
"""Connect the given receiver to a new box receiver for the given
protocol .
After connecting this router to an AMP server , use this method
similarly to how you would use C { reactor . connectTCP } to establish a new
connection to an HTTP , SMTP , or IRC server .
@ param receiver : An L { IBoxReceiver } which will be started when a route
to a receiver for the given protocol is found .
@ param protocol : The name of a protocol which the AMP peer to which
this router is connected has an L { IBoxReceiverFactory } .
@ return : A L { Deferred } which fires with C { receiver } when the route is
established ."""
|
route = router . bindRoute ( receiver )
d = amp . callRemote ( Connect , origin = route . localRouteName , protocol = protocol )
def cbGotRoute ( result ) :
route . connectTo ( result [ 'route' ] )
return receiver
d . addCallback ( cbGotRoute )
return d
|
def get_residue_mapping ( self ) :
'''Returns a mapping between the sequences ONLY IF there are exactly two . This restriction makes the code much simpler .'''
|
if len ( self . sequence_ids ) == 2 :
if not self . alignment_output :
self . align ( )
assert ( self . alignment_output )
return self . _create_residue_map ( self . _get_alignment_lines ( ) , self . sequence_ids [ 1 ] , self . sequence_ids [ 2 ] )
else :
return None
|
def _generate_data_for_format ( fmt ) :
"""Generate a fake data dictionary to fill in the provided format string ."""
|
# finally try some data , create some random data for the fmt .
data = { }
# keep track of how many " free _ size " ( wildcard ) parameters we have
# if we get two in a row then we know the pattern is invalid , meaning
# we ' ll never be able to match the second wildcard field
free_size_start = False
for literal_text , field_name , format_spec , conversion in formatter . parse ( fmt ) :
if literal_text :
free_size_start = False
if not field_name :
free_size_start = False
continue
# encapsulating free size keys ,
# e . g . { : s } { : s } or { : s } { : 4s } { : d }
if not format_spec or format_spec == "s" or format_spec == "d" :
if free_size_start :
return None
else :
free_size_start = True
# make some data for this key and format
if format_spec and '%' in format_spec : # some datetime
t = dt . datetime . now ( )
# run once through format to limit precision
t = parse ( "{t:" + format_spec + "}" , compose ( "{t:" + format_spec + "}" , { 't' : t } ) ) [ 't' ]
data [ field_name ] = t
elif format_spec and 'd' in format_spec : # random number ( with n sign . figures )
if not format_spec . isalpha ( ) :
n = _get_number_from_fmt ( format_spec )
else : # clearly bad
return None
data [ field_name ] = random . randint ( 0 , 99999999999999999 ) % ( 10 ** n )
else : # string type
if format_spec is None :
n = 4
elif format_spec . isalnum ( ) :
n = _get_number_from_fmt ( format_spec )
else :
n = 4
randstri = ''
for x in range ( n ) :
randstri += random . choice ( string . ascii_letters )
data [ field_name ] = randstri
return data
|
def trace3D ( self ) :
"""Give a 3D representation of the traceroute .
right button : rotate the scene
middle button : zoom
left button : move the scene
left button on a ball : toggle IP displaying
ctrl - left button on a ball : scan ports 21,22,23,25,80 and 443 and display the result"""
|
trace = self . get_trace ( )
import visual
class IPsphere ( visual . sphere ) :
def __init__ ( self , ip , ** kargs ) :
visual . sphere . __init__ ( self , ** kargs )
self . ip = ip
self . label = None
self . setlabel ( self . ip )
def setlabel ( self , txt , visible = None ) :
if self . label is not None :
if visible is None :
visible = self . label . visible
self . label . visible = 0
elif visible is None :
visible = 0
self . label = visual . label ( text = txt , pos = self . pos , space = self . radius , xoffset = 10 , yoffset = 20 , visible = visible )
def action ( self ) :
self . label . visible ^= 1
visual . scene = visual . display ( )
visual . scene . exit = True
start = visual . box ( )
rings = { }
tr3d = { }
for i in trace :
tr = trace [ i ]
tr3d [ i ] = [ ]
ttl = tr . keys ( )
for t in range ( 1 , max ( ttl ) + 1 ) :
if t not in rings :
rings [ t ] = [ ]
if t in tr :
if tr [ t ] not in rings [ t ] :
rings [ t ] . append ( tr [ t ] )
tr3d [ i ] . append ( rings [ t ] . index ( tr [ t ] ) )
else :
rings [ t ] . append ( ( "unk" , - 1 ) )
tr3d [ i ] . append ( len ( rings [ t ] ) - 1 )
for t in rings :
r = rings [ t ]
l = len ( r )
for i in range ( l ) :
if r [ i ] [ 1 ] == - 1 :
col = ( 0.75 , 0.75 , 0.75 )
elif r [ i ] [ 1 ] :
col = visual . color . green
else :
col = visual . color . blue
s = IPsphere ( pos = ( ( l - 1 ) * visual . cos ( 2 * i * visual . pi / l ) , ( l - 1 ) * visual . sin ( 2 * i * visual . pi / l ) , 2 * t ) , ip = r [ i ] [ 0 ] , color = col )
for trlst in tr3d . values ( ) :
if t <= len ( trlst ) :
if trlst [ t - 1 ] == i :
trlst [ t - 1 ] = s
forecol = colgen ( 0.625 , 0.4375 , 0.25 , 0.125 )
for trlst in tr3d . values ( ) :
col = next ( forecol )
start = ( 0 , 0 , 0 )
for ip in trlst :
visual . cylinder ( pos = start , axis = ip . pos - start , color = col , radius = 0.2 )
start = ip . pos
movcenter = None
while 1 :
visual . rate ( 50 )
if visual . scene . kb . keys :
k = visual . scene . kb . getkey ( )
if k == "esc" or k == "q" :
break
if visual . scene . mouse . events :
ev = visual . scene . mouse . getevent ( )
if ev . press == "left" :
o = ev . pick
if o :
if ev . ctrl :
if o . ip == "unk" :
continue
savcolor = o . color
o . color = ( 1 , 0 , 0 )
a , _ = sr ( IP ( dst = o . ip ) / TCP ( dport = [ 21 , 22 , 23 , 25 , 80 , 443 ] ) , timeout = 2 )
o . color = savcolor
if len ( a ) == 0 :
txt = "%s:\nno results" % o . ip
else :
txt = "%s:\n" % o . ip
for s , r in a :
txt += r . sprintf ( "{TCP:%IP.src%:%TCP.sport% %TCP.flags%}{TCPerror:%IPerror.dst%:%TCPerror.dport% %IP.src% %ir,ICMP.type%}\n" )
o . setlabel ( txt , visible = 1 )
else :
if hasattr ( o , "action" ) :
o . action ( )
elif ev . drag == "left" :
movcenter = ev . pos
elif ev . drop == "left" :
movcenter = None
if movcenter :
visual . scene . center -= visual . scene . mouse . pos - movcenter
movcenter = visual . scene . mouse . pos
|
def error_and_result ( f ) :
"""Format task result into json dictionary ` { ' data ' : task return value } ` if no
exception was raised during the task execution . If there was raised an
exception during task execution , formats task result into dictionary
` { ' error ' : exception message with traceback } ` ."""
|
@ wraps ( f )
def error_and_result_decorator ( * args , ** kwargs ) :
return error_and_result_decorator_inner_fn ( f , False , * args , ** kwargs )
return error_and_result_decorator
|
async def close ( self ) -> None :
"""Explicit exit . If so configured , populate cache to prove for any creds on schemata ,
cred defs , and rev regs marked of interest in configuration at initialization ,
archive cache , and purge prior cache archives .
: return : current object"""
|
LOGGER . debug ( 'OrgHubAnchor.close >>>' )
archive_caches = False
if self . config . get ( 'archive-holder-prover-caches-on-close' , False ) :
archive_caches = True
await self . load_cache_for_proof ( False )
if self . config . get ( 'archive-verifier-caches-on-close' , { } ) :
archive_caches = True
await self . load_cache_for_verification ( False )
if archive_caches :
ArchivableCaches . archive ( self . dir_cache )
ArchivableCaches . purge_archives ( self . dir_cache , True )
# Do not close wallet independently : allow for sharing open wallet over many anchor lifetimes
# await self . wallet . close ( ) # 1.7.8
# Do not close pool independently : let relying party decide when to go on - line and off - line
for path_rr_id in Tails . links ( self . _dir_tails ) :
rr_id = basename ( path_rr_id )
try :
await HolderProver . _sync_revoc_for_proof ( self , rr_id )
except ClosedPool :
LOGGER . warning ( 'OrgHubAnchor sync-revoc on close required ledger for %s but pool was closed' , rr_id )
LOGGER . debug ( 'OrgHubAnchor.close <<<' )
|
def duplicateAnalysis ( analysis ) :
"""Duplicate an analysis consist on creating a new analysis with
the same analysis service for the same sample . It is used in
order to reduce the error procedure probability because both
results must be similar .
: base : the analysis object used as the creation base ."""
|
ar = analysis . aq_parent
kw = analysis . getKeyword ( )
# Rename the analysis to make way for it ' s successor .
# Support multiple duplicates by renaming to * - 0 , * - 1 , etc
cnt = [ x for x in ar . objectValues ( "Analysis" ) if x . getId ( ) . startswith ( kw ) ]
a_id = "{0}-{1}" . format ( kw , len ( cnt ) )
dup = create_analysis ( ar , analysis , id = a_id , Retested = True )
return dup
|
def set_dynamic_settings ( s ) :
"""Called at the end of the project ' s settings module , and is passed
its globals dict for updating with some final tweaks for settings
that generally aren ' t specified , but can be given some better
defaults based on other settings that have been specified . Broken
out into its own function so that the code need not be replicated
in the settings modules of other project - based apps that leverage
yacms ' s settings module ."""
|
# Moves an existing list setting value to a different position .
move = lambda n , k , i : s [ n ] . insert ( i , s [ n ] . pop ( s [ n ] . index ( k ) ) )
# Add a value to the end of a list setting if not in the list .
append = lambda n , k : s [ n ] . append ( k ) if k not in s [ n ] else None
# Add a value to the start of a list setting if not in the list .
prepend = lambda n , k : s [ n ] . insert ( 0 , k ) if k not in s [ n ] else None
# Remove a value from a list setting if in the list .
remove = lambda n , k : s [ n ] . remove ( k ) if k in s [ n ] else None
# Django 1.10 middleware compatibility
MIDDLEWARE_SETTING_NAME = 'MIDDLEWARE' if 'MIDDLEWARE' in s and s [ 'MIDDLEWARE' ] is not None else 'MIDDLEWARE_CLASSES'
if not s . get ( "ALLOWED_HOSTS" , [ ] ) :
warn ( "You haven't defined the ALLOWED_HOSTS settings, which " "Django requires. Will fall back to the domains " "configured as sites." )
s [ "ALLOWED_HOSTS" ] = SitesAllowedHosts ( )
if s . get ( "TIME_ZONE" , None ) is None :
tz = get_best_local_timezone ( )
s [ "TIME_ZONE" ] = tz
warn ( "TIME_ZONE setting is not set, using closest match: %s" % tz )
# Define some settings based on management command being run .
management_command = sys . argv [ 1 ] if len ( sys . argv ) > 1 else ""
# Some kind of testing is running via test or testserver .
s [ "TESTING" ] = management_command in ( "test" , "testserver" )
# Some kind of development server is running via runserver ,
# runserver _ plus or harvest ( lettuce )
s [ "DEV_SERVER" ] = management_command . startswith ( ( "runserver" , "harvest" ) )
# Change tuple settings to lists for easier manipulation .
s . setdefault ( "AUTHENTICATION_BACKENDS" , defaults . AUTHENTICATION_BACKENDS )
s . setdefault ( "STATICFILES_FINDERS" , defaults . STATICFILES_FINDERS )
tuple_list_settings = [ "AUTHENTICATION_BACKENDS" , "INSTALLED_APPS" , MIDDLEWARE_SETTING_NAME , "STATICFILES_FINDERS" , "LANGUAGES" , "TEMPLATE_CONTEXT_PROCESSORS" ]
for setting in tuple_list_settings [ : ] :
if not isinstance ( s . get ( setting , [ ] ) , list ) :
s [ setting ] = list ( s [ setting ] )
else : # Setting is already a list , so we ' ll exclude it from
# the list of settings we ' ll revert back to tuples .
tuple_list_settings . remove ( setting )
# Set up cookie messaging if none defined .
storage = "django.contrib.messages.storage.cookie.CookieStorage"
s . setdefault ( "MESSAGE_STORAGE" , storage )
# If required , add django - modeltranslation for both tests and deployment
if not s . get ( "USE_MODELTRANSLATION" , False ) or s [ "TESTING" ] :
s [ "USE_MODELTRANSLATION" ] = False
remove ( "INSTALLED_APPS" , "modeltranslation" )
else :
try :
__import__ ( "modeltranslation" )
except ImportError : # django - modeltranslation is not installed , remove setting so
# admin won ' t try to import it
s [ "USE_MODELTRANSLATION" ] = False
remove ( "INSTALLED_APPS" , "modeltranslation" )
warn ( "USE_MODETRANSLATION setting is set to True but django-" "modeltranslation is not installed. Disabling it." )
else : # Force i18n so we are assured that modeltranslation is active
s [ "USE_I18N" ] = True
append ( "INSTALLED_APPS" , "modeltranslation" )
# Setup for optional apps .
optional = list ( s . get ( "OPTIONAL_APPS" , [ ] ) )
for app in optional :
if app not in s [ "INSTALLED_APPS" ] :
try :
__import__ ( app )
except ImportError :
pass
else :
s [ "INSTALLED_APPS" ] . append ( app )
if s [ "TESTING" ] : # Triggers interactive superuser creation and some pyc / pyo tests
# fail with standard permissions .
remove ( "INSTALLED_APPS" , "django_extensions" )
if "debug_toolbar" in s [ "INSTALLED_APPS" ] : # We need to configure debug _ toolbar manually otherwise it
# breaks in conjunction with modeltranslation . See the
# " Explicit setup " section in debug _ toolbar docs for more info .
s [ "DEBUG_TOOLBAR_PATCH_SETTINGS" ] = False
debug_mw = "debug_toolbar.middleware.DebugToolbarMiddleware"
append ( MIDDLEWARE_SETTING_NAME , debug_mw )
s . setdefault ( "INTERNAL_IPS" , ( "127.0.0.1" , ) )
# If compressor installed , ensure it ' s configured and make
# yacms ' s settings available to its offline context ,
# since jQuery is configured via a setting .
if "compressor" in s [ "INSTALLED_APPS" ] :
append ( "STATICFILES_FINDERS" , "compressor.finders.CompressorFinder" )
s . setdefault ( "COMPRESS_OFFLINE_CONTEXT" , { "MEDIA_URL" : s . get ( "MEDIA_URL" , "" ) , "STATIC_URL" : s . get ( "STATIC_URL" , "" ) , } )
def yacms_settings ( ) :
from yacms . conf import settings
return settings
s [ "COMPRESS_OFFLINE_CONTEXT" ] [ "settings" ] = yacms_settings
# Ensure the yacms auth backend is enabled if
# yacms . accounts is being used .
if "yacms.accounts" in s [ "INSTALLED_APPS" ] :
auth_backend = "yacms.core.auth_backends.YaCmsBackend"
s . setdefault ( "AUTHENTICATION_BACKENDS" , [ ] )
prepend ( "AUTHENTICATION_BACKENDS" , auth_backend )
# Ensure Grappelli is after yacms in app order so that
# admin templates are loaded in the correct order .
grappelli_name = s . get ( "PACKAGE_NAME_GRAPPELLI" )
try :
move ( "INSTALLED_APPS" , grappelli_name , len ( s [ "INSTALLED_APPS" ] ) )
except ValueError :
s [ "GRAPPELLI_INSTALLED" ] = False
else :
s [ "GRAPPELLI_INSTALLED" ] = True
# Ensure admin is at the bottom of the app order so that admin
# templates are loaded in the correct order , and that staticfiles
# is also at the end so its runserver can be overridden .
for app in [ "django.contrib.admin" , "django.contrib.staticfiles" ] :
try :
move ( "INSTALLED_APPS" , app , len ( s [ "INSTALLED_APPS" ] ) )
except ValueError :
pass
# Add missing apps if existing apps depend on them .
if "yacms.blog" in s [ "INSTALLED_APPS" ] :
append ( "INSTALLED_APPS" , "yacms.generic" )
if "yacms.generic" in s [ "INSTALLED_APPS" ] :
s . setdefault ( "COMMENTS_APP" , "yacms.generic" )
append ( "INSTALLED_APPS" , "django_comments" )
# Ensure yacms . boot is first .
try :
move ( "INSTALLED_APPS" , "yacms.boot" , 0 )
except ValueError :
pass
# Remove caching middleware if no backend defined .
if not ( s . get ( "CACHE_BACKEND" ) or s . get ( "CACHES" ) ) :
s [ MIDDLEWARE_SETTING_NAME ] = [ mw for mw in s [ MIDDLEWARE_SETTING_NAME ] if not ( mw . endswith ( "UpdateCacheMiddleware" ) or mw . endswith ( "FetchFromCacheMiddleware" ) ) ]
# If only LANGUAGE _ CODE has been defined , ensure the other required
# settings for translations are configured .
if ( s . get ( "LANGUAGE_CODE" ) and len ( s . get ( "LANGUAGES" , [ ] ) ) == 1 and s [ "LANGUAGE_CODE" ] != s [ "LANGUAGES" ] [ 0 ] [ 0 ] ) :
s [ "USE_I18N" ] = True
s [ "LANGUAGES" ] = [ ( s [ "LANGUAGE_CODE" ] , "" ) ]
# Ensure required middleware is installed , otherwise admin
# becomes inaccessible .
mw = "django.middleware.locale.LocaleMiddleware"
if s [ "USE_I18N" ] and mw not in s [ MIDDLEWARE_SETTING_NAME ] :
session = s [ MIDDLEWARE_SETTING_NAME ] . index ( "django.contrib.sessions.middleware.SessionMiddleware" )
s [ MIDDLEWARE_SETTING_NAME ] . insert ( session + 1 , mw )
# Revert tuple settings back to tuples .
for setting in tuple_list_settings :
s [ setting ] = tuple ( s [ setting ] )
# Some settings tweaks for different DB engines .
for ( key , db ) in s [ "DATABASES" ] . items ( ) :
shortname = db [ "ENGINE" ] . split ( "." ) [ - 1 ]
if shortname == "sqlite3" : # If the Sqlite DB name doesn ' t contain a path , assume
# it ' s in the project directory and add the path to it .
if "NAME" in db and os . sep not in db [ "NAME" ] :
db_path = os . path . join ( s . get ( "PROJECT_ROOT" , "" ) , db [ "NAME" ] )
db [ "NAME" ] = db_path
elif shortname == "mysql" : # Required MySQL collation for tests .
db . setdefault ( "TEST" , { } ) [ "COLLATION" ] = "utf8_general_ci"
|
def DocInheritMeta ( style = "parent" , abstract_base_class = False ) :
"""A metaclass that merges the respective docstrings of a parent class and of its child , along with their
properties , methods ( including classmethod , staticmethod , decorated methods ) .
Parameters
style : Union [ Any , Callable [ [ str , str ] , str ] ] , optional ( default : " parent " )
A valid inheritance - scheme style ID or function that merges two docstrings .
abstract _ base _ class : bool , optional ( default : False )
If True , the returned metaclass inherits from abc . ABCMeta .
Thus a class that derives from DocInheritMeta ( style = " numpy " , abstract _ base _ class = True )
will be an abstract base class , whose derived classes will inherit docstrings
using the numpy - style inheritance scheme .
Returns
custom _ inherit . DocInheritorBase"""
|
merge_func = store [ style ]
metaclass = _DocInheritorBase
metaclass . class_doc_inherit = staticmethod ( merge_func )
metaclass . attr_doc_inherit = staticmethod ( merge_func )
return metaclass if not abstract_base_class else type ( "abc" + metaclass . __name__ , ( _ABCMeta , metaclass ) , { } )
|
def insert ( self , space_no , * args ) :
"""insert tuple , if primary key exists server will return error"""
|
d = self . replyQueue . get ( )
packet = RequestInsert ( self . charset , self . errors , d . _ipro_request_id , space_no , Request . TNT_FLAG_ADD , * args )
self . transport . write ( bytes ( packet ) )
return d . addCallback ( self . handle_reply , self . charset , self . errors , None )
|
def every ( delay , task , name ) :
"""Executes a task every ` delay ` seconds
: param delay : the delay in seconds
: param task : the method to run . The method should return False if you want the loop to stop .
: return : None"""
|
next_time = time . time ( ) + delay
while True :
time . sleep ( max ( 0 , next_time - time . time ( ) ) )
try :
if task ( ) is False :
break
except Exception :
logger . debug ( "Problem while executing repetitive task: %s" % name , exc_info = True )
# skip tasks if we are behind schedule :
next_time += ( time . time ( ) - next_time ) // delay * delay + delay
|
def led_rgb ( self , color ) :
"""Set devices LED color ."""
|
if ( not isinstance ( color , ( list , tuple ) ) or not all ( isinstance ( item , int ) for item in color ) ) :
raise SkybellException ( ERROR . COLOR_VALUE_NOT_VALID , color )
self . _set_setting ( { CONST . SETTINGS_LED_R : color [ 0 ] , CONST . SETTINGS_LED_G : color [ 1 ] , CONST . SETTINGS_LED_B : color [ 2 ] } )
|
def get_albums ( self , search , start = 0 , max_items = 100 ) :
"""Search for albums .
See get _ music _ service _ information for details on the arguments"""
|
return self . get_music_service_information ( 'albums' , search , start , max_items )
|
def cache_files ( db , aid : int , anime_files : AnimeFiles ) -> None :
"""Cache files for anime ."""
|
with db :
cache_status ( db , aid )
db . cursor ( ) . execute ( """UPDATE cache_anime
SET anime_files=?
WHERE aid=?""" , ( anime_files . to_json ( ) , aid ) )
|
def use_comparative_proficiency_view ( self ) :
"""Pass through to provider ProficiencyLookupSession . use _ comparative _ proficiency _ view"""
|
self . _object_views [ 'proficiency' ] = COMPARATIVE
# self . _ get _ provider _ session ( ' proficiency _ lookup _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_comparative_proficiency_view ( )
except AttributeError :
pass
|
def properties_from_mapping ( self , bt_addr ) :
"""Retrieve properties ( namespace , instance ) for the specified bt address ."""
|
for addr , properties in self . eddystone_mappings :
if addr == bt_addr :
return properties
return None
|
def plot ( self , size = 1 ) :
"""Plot the values in the color palette as a horizontal array .
See Seaborn ' s palplot function for inspiration .
Parameters
size : int
scaling factor for size of the plot"""
|
n = len ( self )
fig , ax = plt . subplots ( 1 , 1 , figsize = ( n * size , size ) )
ax . imshow ( np . arange ( n ) . reshape ( 1 , n ) , cmap = mpl . colors . ListedColormap ( list ( self ) ) , interpolation = "nearest" , aspect = "auto" )
ax . set_xticks ( np . arange ( n ) - .5 )
ax . set_yticks ( [ - .5 , .5 ] )
ax . set_xticklabels ( [ ] )
ax . set_yticklabels ( [ ] )
|
def toggle_item ( self , item , test_func , field_name = None ) :
"""Toggles the section based on test _ func .
test _ func takes an item and returns a boolean . If it returns True , the
item will be added to the given section . It will be removed from the
section otherwise .
Intended for use with items of settings . ARMSTRONG _ SECTION _ ITEM _ MODEL .
Behavior on other items is undefined ."""
|
if test_func ( item ) :
self . add_item ( item , field_name )
return True
else :
self . remove_item ( item , field_name )
return False
|
def get_chat_members_count ( self , chat_id : Union [ int , str ] ) -> int :
"""Use this method to get the number of members in a chat .
Args :
chat _ id ( ` ` int ` ` | ` ` str ` ` ) :
Unique identifier ( int ) or username ( str ) of the target chat .
Returns :
On success , an integer is returned .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .
` ` ValueError ` ` if a chat _ id belongs to user ."""
|
peer = self . resolve_peer ( chat_id )
if isinstance ( peer , types . InputPeerChat ) :
return self . send ( functions . messages . GetChats ( id = [ peer . chat_id ] ) ) . chats [ 0 ] . participants_count
elif isinstance ( peer , types . InputPeerChannel ) :
return self . send ( functions . channels . GetFullChannel ( channel = peer ) ) . full_chat . participants_count
else :
raise ValueError ( "The chat_id \"{}\" belongs to a user" . format ( chat_id ) )
|
def get_bank_lookup_session ( self ) :
"""Gets the OsidSession associated with the bank lookup service .
return : ( osid . assessment . BankLookupSession ) - a
` ` BankLookupSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ bank _ lookup ( ) is false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ bank _ lookup ( ) ` ` is true . *"""
|
if not self . supports_bank_lookup ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . BankLookupSession ( runtime = self . _runtime )
|
def delete_observation ( observation_id : int , access_token : str ) -> List [ Dict [ str , Any ] ] :
"""Delete an observation .
: param observation _ id :
: param access _ token :
: return :"""
|
headers = _build_auth_header ( access_token )
headers [ 'Content-type' ] = 'application/json'
response = requests . delete ( url = "{base_url}/observations/{id}.json" . format ( base_url = INAT_BASE_URL , id = observation_id ) , headers = headers )
response . raise_for_status ( )
# According to iNaturalist documentation , proper JSON should be returned . It seems however that the response is
# currently empty ( while the requests succeed ) , so you may receive a JSONDecode exception .
# TODO : report to iNaturalist team if the issue persists
return response . json ( )
|
def first_and_second_harmonic_function ( phi , c ) :
"""Compute the harmonic function value used to calculate the
corrections for ellipse fitting .
This function includes simultaneously both the first and second
order harmonics :
. . math : :
f ( phi ) = c [ 0 ] + c [ 1 ] * \\ sin ( phi ) + c [ 2 ] * \\ cos ( phi ) +
c [ 3 ] * \\ sin ( 2 * phi ) + c [ 4 ] * \\ cos ( 2 * phi )
Parameters
phi : float or ` ~ numpy . ndarray `
The angle ( s ) along the elliptical path , going towards the positive
y axis , starting coincident with the position angle . That is , the
angles are defined from the semimajor axis that lies in
the positive x quadrant .
c : ` ~ numpy . ndarray ` of shape ( 5 , )
Array containing the five harmonic coefficients .
Returns
result : float or ` ~ numpy . ndarray `
The function value ( s ) at the given input angle ( s ) ."""
|
return ( c [ 0 ] + c [ 1 ] * np . sin ( phi ) + c [ 2 ] * np . cos ( phi ) + c [ 3 ] * np . sin ( 2 * phi ) + c [ 4 ] * np . cos ( 2 * phi ) )
|
def setValue ( self , value ) :
"""Cast some known data in custom parameters ."""
|
if self . name in self . _CUSTOM_INT_PARAMS :
value = int ( value )
elif self . name in self . _CUSTOM_FLOAT_PARAMS :
value = float ( value )
elif self . name in self . _CUSTOM_BOOL_PARAMS :
value = bool ( value )
elif self . name in self . _CUSTOM_INTLIST_PARAMS :
value = readIntlist ( value )
elif self . name in self . _CUSTOM_DICT_PARAMS :
parser = Parser ( )
value = parser . parse ( value )
elif self . name == "note" :
value = unicode ( value )
self . _value = value
|
def upload ( ctx , yes = False ) :
"""Upload the package to PyPI ."""
|
import callee
version = callee . __version__
# check the packages version
# TODO : add a ' release ' to automatically bless a version as release one
if version . endswith ( '-dev' ) :
fatal ( "Can't upload a development version (%s) to PyPI!" , version )
# run the upload if it has been confirmed by the user
if not yes :
answer = input ( "Do you really want to upload to PyPI [y/N]? " )
yes = answer . strip ( ) . lower ( ) == 'y'
if not yes :
logging . warning ( "Aborted -- not uploading to PyPI." )
return - 2
logging . debug ( "Uploading version %s to PyPI..." , version )
setup_py_upload = ctx . run ( 'python setup.py sdist upload' )
if not setup_py_upload . ok :
fatal ( "Failed to upload version %s to PyPI!" , version , cause = setup_py_upload )
logging . info ( "PyPI upload completed successfully." )
# add a Git tag and push
git_tag = ctx . run ( 'git tag %s' % version )
if not git_tag . ok :
fatal ( "Failed to add a Git tag for uploaded version %s" , version , cause = git_tag )
git_push = ctx . run ( 'git push && git push --tags' )
if not git_push . ok :
fatal ( "Failed to push the release upstream." , cause = git_push )
|
def isset ( name ) :
"""Only execute the function if the variable is set .
Args :
name : The name of the environment variable
Returns :
The function return value or ` None ` if the function was skipped ."""
|
def wrapped ( func ) :
@ functools . wraps ( func )
def _decorator ( * args , ** kwargs ) :
if core . isset ( name ) :
return func ( * args , ** kwargs )
return _decorator
return wrapped
|
def delete_project ( self , owner , id , ** kwargs ) :
"""Delete a project
Permanently deletes a project and all data associated with it . This operation cannot be undone , although a new project may be created with the same id .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
> > > def callback _ function ( response ) :
> > > pprint ( response )
> > > thread = api . delete _ project ( owner , id , callback = callback _ function )
: param callback function : The callback function
for asynchronous request . ( optional )
: param str owner : User name and unique identifier of the creator of a project . For example , in the URL : [ https : / / data . world / government / how - to - add - depth - to - your - data - with - the - us - census - acs ] ( https : / / data . world / government / how - to - add - depth - to - your - data - with - the - us - census - acs ) , government is the unique identifier of the owner . ( required )
: param str id : Project unique identifier . For example , in the URL : [ https : / / data . world / government / how - to - add - depth - to - your - data - with - the - us - census - acs ] ( https : / / data . world / government / how - to - add - depth - to - your - data - with - the - us - census - acs ) , how - to - add - depth - to - your - data - with - the - us - census - acs is the unique identifier of the project . ( required )
: return : SuccessMessage
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'callback' ) :
return self . delete_project_with_http_info ( owner , id , ** kwargs )
else :
( data ) = self . delete_project_with_http_info ( owner , id , ** kwargs )
return data
|
def is_sleep ( key ) :
"""Determine return data by use cache if this key is in the sleep time window ( happened error )"""
|
lock . acquire ( )
try :
if key not in sleep_record :
return False
return time . time ( ) < sleep_record [ key ]
finally :
lock . release ( )
|
def _initialize ( self ) :
"""Initialize the binary .
: return : None"""
|
# figure out section alignments
for section in self . project . loader . main_object . sections :
in_segment = False
for segment in self . project . loader . main_object . segments :
segment_addr = segment . vaddr
if segment_addr <= section . vaddr < segment_addr + segment . memsize :
in_segment = True
break
if not in_segment :
continue
# calculate alignments
if section . vaddr % 0x20 == 0 :
alignment = 0x20
elif section . vaddr % 0x10 == 0 :
alignment = 0x10
elif section . vaddr % 0x8 == 0 :
alignment = 0x8
elif section . vaddr % 0x4 == 0 :
alignment = 0x4
else :
alignment = 2
self . _section_alignments [ section . name ] = alignment
l . debug ( 'Generating CFG...' )
cfg = self . project . analyses . CFG ( normalize = True , resolve_indirect_jumps = True , collect_data_references = True , extra_memory_regions = [ ( 0x4347c000 , 0x4347c000 + 0x1000 ) ] , data_type_guessing_handlers = [ self . _sequence_handler , self . _cgc_extended_application_handler , self . _unknown_data_size_handler , ] , )
self . cfg = cfg
old_capstone_syntax = self . project . arch . capstone_x86_syntax
if old_capstone_syntax is None :
old_capstone_syntax = 'intel'
if self . syntax == 'at&t' : # switch capstone to AT & T style
self . project . arch . capstone_x86_syntax = "at&t"
# clear the block cache in lifter !
self . project . factory . default_engine . clear_cache ( )
# initialize symbol manager
self . symbol_manager = SymbolManager ( self , cfg )
# collect address of all instructions
l . debug ( 'Collecting instruction addresses...' )
for cfg_node in self . cfg . nodes ( ) :
self . all_insn_addrs |= set ( cfg_node . instruction_addrs )
# Functions
l . debug ( 'Creating functions...' )
for f in cfg . kb . functions . values ( ) : # Skip all SimProcedures
if self . project . is_hooked ( f . addr ) :
continue
elif self . project . simos . is_syscall_addr ( f . addr ) :
continue
# Check which section the start address belongs to
section = next ( iter ( sec . name for sec in self . project . loader . main_object . sections if f . addr >= sec . vaddr and f . addr < sec . vaddr + sec . memsize ) , ".text" )
if section in ( '.got' , '.plt' , 'init' , 'fini' ) :
continue
procedure = Procedure ( self , f , section = section )
self . procedures . append ( procedure )
self . procedures = sorted ( self . procedures , key = lambda x : x . addr )
# Data
has_sections = len ( self . project . loader . main_object . sections ) > 0
l . debug ( 'Creating data entries...' )
for addr , memory_data in cfg . _memory_data . items ( ) :
if memory_data . sort in ( 'code reference' , ) :
continue
if memory_data . sort == 'string' : # it might be the CGC package list
new_sort , new_size = self . _cgc_package_list_identifier ( memory_data . address , memory_data . size )
if new_sort is not None : # oh we got it !
memory_data = memory_data . copy ( )
memory_data . sort = new_sort
if has_sections : # Check which section the start address belongs to
section = next ( iter ( sec for sec in self . project . loader . main_object . sections if sec . vaddr <= addr < sec . vaddr + sec . memsize ) , None )
if section is not None and section . name not in ( '.note.gnu.build-id' , ) : # ignore certain section names
data = Data ( self , memory_data , section = section )
self . data . append ( data )
elif memory_data . sort == 'segment-boundary' : # it just points to the end of the segment or a section
section = next ( iter ( sec for sec in self . project . loader . main_object . sections if addr == sec . vaddr + sec . memsize ) , None )
if section is not None :
data = Data ( self , memory_data , section = section )
self . data . append ( data )
else : # data = Data ( self , memory _ data , section _ name = ' . data ' )
# the data is not really within any existing section . weird . ignored it .
pass
else : # the binary does not have any section
# we use segment information instead
# TODO : this logic needs reviewing
segment = next ( iter ( seg for seg in self . project . loader . main_object . segments if seg . vaddr <= addr <= seg . vaddr + seg . memsize ) , None )
if segment is not None :
data = Data ( self , memory_data , section_name = '.data' )
self . data . append ( data )
# remove all data that belong to GCC - specific sections
section_names_to_ignore = { '.init' , '.fini' , '.fini_array' , '.jcr' , '.dynamic' , '.got' , '.got.plt' , '.eh_frame_hdr' , '.eh_frame' , '.rel.dyn' , '.rel.plt' , '.rela.dyn' , '.rela.plt' , '.dynstr' , '.dynsym' , '.interp' , '.note.ABI-tag' , '.note.gnu.build-id' , '.gnu.hash' , '.gnu.version' , '.gnu.version_r' }
# make sure there are always memory data entries pointing at the end of sections
all_data_addrs = set ( d . addr for d in self . data )
all_procedure_addrs = set ( f . addr for f in self . procedures )
all_addrs = all_data_addrs | all_procedure_addrs
if has_sections :
for section in self . project . loader . main_object . sections :
if section . name in section_names_to_ignore : # skip all sections that are CGC specific
continue
# make sure this section is inside a segment
for segment in self . project . loader . main_object . segments :
segment_start = segment . vaddr
segment_end = segment_start + segment . memsize
if segment_start <= section . vaddr < segment_end :
break
else : # this section is not mapped into memory
continue
section_boundary_addr = section . vaddr + section . memsize
if section_boundary_addr not in all_addrs :
data = Data ( self , addr = section_boundary_addr , size = 0 , sort = 'segment-boundary' , section_name = section . name )
self . data . append ( data )
# add the address to all _ data _ addrs so we don ' t end up adding another boundary in
all_data_addrs . add ( section_boundary_addr )
self . data = sorted ( self . data , key = lambda x : x . addr )
data_indices_to_remove = set ( )
# Go through data entry list and refine them
for i , data in enumerate ( self . data ) :
if i in data_indices_to_remove :
continue
# process the overlapping ones
if i < len ( self . data ) - 1 :
if data . addr + data . size > self . data [ i + 1 ] . addr : # they are overlapping : - (
# TODO : make sure new _ size makes sense
new_size = self . data [ i + 1 ] . addr - data . addr
# there are cases that legit data is misclassified as pointers
# we are able to detect some of them here
if data . sort == 'pointer-array' :
pointer_size = self . project . arch . bytes
if new_size % pointer_size != 0 : # the self . data [ i + 1 ] cannot be pointed to by a pointer
# remove that guy later
data_indices_to_remove . add ( i + 1 )
# mark the source as a non - pointer
# apparently the original Reassembleable Disassembler paper cannot get this case
source_addr = self . data [ i + 1 ] . memory_data . pointer_addr
if source_addr is not None : # find the original data
original_data = next ( ( d for d in self . data if d . addr <= source_addr < d . addr + d . size ) , None )
if original_data is not None :
original_data . desymbolize ( )
continue
data . shrink ( new_size )
# process those ones whose type is unknown
if data . sort == 'unknown' and data . size == 0 : # increase its size until reaching the next item
if i + 1 == len ( self . data ) :
if data . section is None :
continue
data . size = data . section . vaddr + data . section . memsize - data . addr
else :
data . size = self . data [ i + 1 ] . addr - data . addr
for i in sorted ( data_indices_to_remove , reverse = True ) :
self . data = self . data [ : i ] + self . data [ i + 1 : ]
# CGC - specific data filtering
self . data = [ d for d in self . data if d . section_name not in section_names_to_ignore ]
# restore capstone X86 syntax at the end
if self . project . arch . capstone_x86_syntax != old_capstone_syntax :
self . project . arch . capstone_x86_syntax = old_capstone_syntax
self . project . factory . default_engine . clear_cache ( )
l . debug ( 'Initialized.' )
|
def make_container_tree ( folders , files , path_delim = "/" , parse_files = True ) :
'''make _ container _ tree will convert a list of folders and files into a json structure that represents a graph .
: param folders : a list of folders in the image
: param files : a list of files in the folder
: param parse _ files : return ' files ' lookup in result , to associate ID of node with files ( default True )
: param path _ delim : the path delimiter , default is ' / ' '''
|
nodes = { }
# first we will make a list of nodes
lookup = { }
count = 1
# count will hold an id for nodes
max_depth = 0
for folder in folders :
if folder != "." :
folder = re . sub ( "^[.]/" , "" , folder )
path_components = folder . split ( path_delim )
for p in range ( len ( path_components ) ) :
path_component = path_components [ p ]
fullpath = path_delim . join ( path_components [ 0 : p + 1 ] )
# Have we created the node yet ?
if fullpath not in lookup :
lookup [ fullpath ] = count
node = { "id" : count , "name" : path_component , "path" : fullpath , "level" : p , "children" : [ ] }
count += 1
# Did we find a deeper level ?
if p > max_depth :
max_depth = p
# Does the node have a parent ?
if p == 0 : # base node , no parent
parent_id = 0
else : # look up the parent id
parent_path = path_delim . join ( path_components [ 0 : p ] )
parent_id = lookup [ parent_path ]
node [ "parent" ] = parent_id
nodes [ node [ 'id' ] ] = node
# Now make the graph , we simply append children to their parents
seen = [ ]
graph = [ ]
iters = list ( range ( max_depth + 1 ) )
# 0,1,2,3 . . .
iters . reverse ( )
# . . . 3,2,1,0
iters . pop ( )
# remove 0
for level in iters :
children = { x : y for x , y in nodes . items ( ) if y [ 'level' ] == level }
seen = seen + [ y [ 'id' ] for x , y in children . items ( ) ]
nodes = { x : y for x , y in nodes . items ( ) if y [ 'id' ] not in seen }
for node_id , child_node in children . items ( ) :
if node_id == 0 : # base node
graph [ node_id ] = child_node
else :
parent_id = child_node [ 'parent' ]
nodes [ parent_id ] [ "children" ] . append ( child_node )
# Now add the parents to graph , with name as main lookup
for parent , parent_info in nodes . items ( ) :
graph . append ( parent_info )
graph = { "name" : "base" , "children" : graph }
result = { "graph" : graph , "lookup" : lookup , "depth" : max_depth + 1 }
# Parse files to include in tree
if parse_files == True :
file_lookup = { }
for filey in files :
filey = re . sub ( "^[.]/" , "" , filey )
filepath , filename = os . path . split ( filey )
if filepath in lookup :
folder_id = lookup [ filepath ]
if folder_id in file_lookup :
file_lookup [ folder_id ] . append ( filename )
else :
file_lookup [ folder_id ] = [ filename ]
elif filepath == '' : # base folder
if 0 in file_lookup :
file_lookup [ 0 ] . append ( filename )
else :
file_lookup [ 0 ] = [ filename ]
result [ 'files' ] = file_lookup
return result
|
def create_expansions ( self , environment_id , collection_id , expansions , ** kwargs ) :
"""Create or update expansion list .
Create or replace the Expansion list for this collection . The maximum number of
expanded terms per collection is ` 500 ` .
The current expansion list is replaced with the uploaded content .
: param str environment _ id : The ID of the environment .
: param str collection _ id : The ID of the collection .
: param list [ Expansion ] expansions : An array of query expansion definitions .
Each object in the * * expansions * * array represents a term or set of terms that
will be expanded into other terms . Each expansion object can be configured as
bidirectional or unidirectional . Bidirectional means that all terms are expanded
to all other terms in the object . Unidirectional means that a set list of terms
can be expanded into a second list of terms .
To create a bi - directional expansion specify an * * expanded _ terms * * array . When
found in a query , all items in the * * expanded _ terms * * array are then expanded to
the other items in the same array .
To create a uni - directional expansion , specify both an array of * * input _ terms * *
and an array of * * expanded _ terms * * . When items in the * * input _ terms * * array are
present in a query , they are expanded using the items listed in the
* * expanded _ terms * * array .
: param dict headers : A ` dict ` containing the request headers
: return : A ` DetailedResponse ` containing the result , headers and HTTP status code .
: rtype : DetailedResponse"""
|
if environment_id is None :
raise ValueError ( 'environment_id must be provided' )
if collection_id is None :
raise ValueError ( 'collection_id must be provided' )
if expansions is None :
raise ValueError ( 'expansions must be provided' )
expansions = [ self . _convert_model ( x , Expansion ) for x in expansions ]
headers = { }
if 'headers' in kwargs :
headers . update ( kwargs . get ( 'headers' ) )
sdk_headers = get_sdk_headers ( 'discovery' , 'V1' , 'create_expansions' )
headers . update ( sdk_headers )
params = { 'version' : self . version }
data = { 'expansions' : expansions }
url = '/v1/environments/{0}/collections/{1}/expansions' . format ( * self . _encode_path_vars ( environment_id , collection_id ) )
response = self . request ( method = 'POST' , url = url , headers = headers , params = params , json = data , accept_json = True )
return response
|
async def set_chat_sticker_set ( self , chat_id : typing . Union [ base . Integer , base . String ] , sticker_set_name : base . String ) -> base . Boolean :
"""Use this method to set a new group sticker set for a supergroup .
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights .
Use the field can _ set _ sticker _ set optionally returned in getChat requests to check
if the bot can use this method .
Source : https : / / core . telegram . org / bots / api # setchatstickerset
: param chat _ id : Unique identifier for the target chat or username of the target supergroup
: type chat _ id : : obj : ` typing . Union [ base . Integer , base . String ] `
: param sticker _ set _ name : Name of the sticker set to be set as the group sticker set
: type sticker _ set _ name : : obj : ` base . String `
: return : Returns True on success
: rtype : : obj : ` base . Boolean `"""
|
payload = generate_payload ( ** locals ( ) )
result = await self . request ( api . Methods . SET_CHAT_STICKER_SET , payload )
return result
|
def is_info_germline ( rec ) :
"""Check if a variant record is germline based on INFO attributes .
Works with VarDict ' s annotation of STATUS ."""
|
if hasattr ( rec , "INFO" ) :
status = rec . INFO . get ( "STATUS" , "" ) . lower ( )
else :
status = rec . info . get ( "STATUS" , "" ) . lower ( )
return status == "germline" or status . find ( "loh" ) >= 0
|
def populate_unpaired_line ( d_vals , f_f_header , missing_val = None ) :
"""used when a value in d _ vals doesn ' t match anything in the other file .
: return : a dictionary , indexed by key value , with the correct missing values
populated for the other file ."""
|
if missing_val is None :
raise MissingValueError ( "Need missing value to output " + " unpaired lines" )
if f_f_header is not None :
f_f_flds = [ dict ( zip ( f_f_header , [ missing_val ] * len ( f_f_header ) ) ) ]
else :
assert ( len ( d_vals ) > 0 )
f_f_num_cols = len ( d_vals [ d_vals . keys ( ) [ 0 ] ] [ 0 ] )
f_f_flds = [ [ missing_val ] * f_f_num_cols ]
return f_f_flds
|
def apply_rule ( self , pattern , replacement , recursive = True ) :
"""Apply a single rules to the expression
This is equivalent to : meth : ` apply _ rules ` with
` ` rules = [ ( pattern , replacement ) ] ` `
Args :
pattern ( . Pattern ) : A pattern containing one or more wildcards
replacement ( callable ) : A callable that takes the wildcard names in
` pattern ` as keyword arguments , and returns a replacement for
any expression that ` pattern ` matches .
Example :
Consider the following Heisenberg Hamiltonian : :
> > > tls = SpinSpace ( label = ' s ' , spin = ' 1/2 ' )
> > > i , j , n = symbols ( ' i , j , n ' , cls = IdxSym )
> > > J = symbols ( ' J ' , cls = sympy . IndexedBase )
> > > def Sig ( i ) :
. . . return OperatorSymbol (
. . . StrLabel ( sympy . Indexed ( ' sigma ' , i ) ) , hs = tls )
> > > H = - Sum ( i , tls ) ( Sum ( j , tls ) (
. . . J [ i , j ] * Sig ( i ) * Sig ( j ) ) )
> > > unicode ( H )
' - ( ∑ _ { i , j ∈ Hs } J _ ij σ̂ _ i ^ ( s ) σ̂ _ j ^ ( s ) ) '
We can transform this into a classical Hamiltonian by replacing the
operators with scalars : :
> > > H _ classical = H . apply _ rule (
. . . pattern ( OperatorSymbol , wc ( ' label ' , head = StrLabel ) ) ,
. . . lambda label : label . expr * IdentityOperator )
> > > unicode ( H _ classical )
' - ( ∑ _ { i , j ∈ Hs } J _ ij σ _ i σ _ j ) '"""
|
return self . apply_rules ( [ ( pattern , replacement ) ] , recursive = recursive )
|
def clear_provider_links ( self ) :
"""Removes the provider chain .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . learning . ActivityForm . clear _ assets _ template
if ( self . get_provider_links_metadata ( ) . is_read_only ( ) or self . get_provider_links_metadata ( ) . is_required ( ) ) :
raise errors . NoAccess ( )
self . _my_map [ 'providerLinkIds' ] = self . _provider_links_default
|
def total_deformation ( u , v , dx , dy ) :
r"""Calculate the horizontal total deformation of the horizontal wind .
Parameters
u : ( M , N ) ndarray
x component of the wind
v : ( M , N ) ndarray
y component of the wind
dx : float or ndarray
The grid spacing ( s ) in the x - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
dy : float or ndarray
The grid spacing ( s ) in the y - direction . If an array , there should be one item less than
the size of ` u ` along the applicable axis .
Returns
( M , N ) ndarray
Total Deformation
See Also
shearing _ deformation , stretching _ deformation
Notes
If inputs have more than two dimensions , they are assumed to have either leading dimensions
of ( x , y ) or trailing dimensions of ( y , x ) , depending on the value of ` ` dim _ order ` ` ."""
|
dudy , dudx = gradient ( u , deltas = ( dy , dx ) , axes = ( - 2 , - 1 ) )
dvdy , dvdx = gradient ( v , deltas = ( dy , dx ) , axes = ( - 2 , - 1 ) )
return np . sqrt ( ( dvdx + dudy ) ** 2 + ( dudx - dvdy ) ** 2 )
|
def check_mailfy ( self , query , kwargs = { } ) :
"""Verifying a mailfy query in this platform .
This might be redefined in any class inheriting from Platform . The only
condition is that any of this should return a dictionary as defined .
Args :
query : The element to be searched .
kwargs : Dictionary with extra parameters . Just in case .
Return :
Returns the collected data if exists or None if not ."""
|
import requests
s = requests . Session ( )
# Getting the first response to grab the csrf _ token
r1 = s . get ( 'https://www.infojobs.net' )
# Launching the query to Instagram
r2 = s . post ( 'https://www.infojobs.net/candidate/profile/check-email-registered.xhtml' , data = { "email" : query } , )
if '{"email_is_secure":true,"email":true}' in r2 . text :
return r2 . text
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.