signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def email ( self , subject , text_body , html_body = None , sender = None , ** kwargs ) : # type : ( str , str , Optional [ str ] , Optional [ str ] , Any ) - > None
"""Emails a user .
Args :
subject ( str ) : Email subject
text _ body ( str ) : Plain text email body
html _ body ( str ) : HTML email body
sender ( Optional [ str ] ) : Email sender . Defaults to SMTP username .
* * kwargs : See below
mail _ options ( List ) : Mail options ( see smtplib documentation )
rcpt _ options ( List ) : Recipient options ( see smtplib documentation )
Returns :
None""" | self . configuration . emailer ( ) . send ( [ self . data [ 'email' ] ] , subject , text_body , html_body = html_body , sender = sender , ** kwargs ) |
def ko_json ( queryset , field_names = None , name = None , safe = False ) :
"""Given a QuerySet , return just the serialized representation
based on the knockout _ fields . Useful for middleware / APIs .
Convenience method around ko _ data .""" | return ko_data ( queryset , field_names , name , safe , return_json = True ) |
def _setup_model_loss ( self , lr ) :
"""Setup loss and optimizer for PyTorch model .""" | # Setup loss
if not hasattr ( self , "loss" ) :
self . loss = SoftCrossEntropyLoss ( )
# Setup optimizer
if not hasattr ( self , "optimizer" ) :
self . optimizer = optim . Adam ( self . parameters ( ) , lr = lr ) |
def delete_multiple ( self , ids = None , messages = None ) :
"""Execute an HTTP request to delete messages from queue .
Arguments :
ids - - A list of messages id to be deleted from the queue .
messages - - Response to message reserving .""" | url = "queues/%s/messages" % self . name
items = None
if ids is None and messages is None :
raise Exception ( 'Please, specify at least one parameter.' )
if ids is not None :
items = [ { 'id' : item } for item in ids ]
if messages is not None :
items = [ { 'id' : item [ 'id' ] , 'reservation_id' : item [ 'reservation_id' ] } for item in messages [ 'messages' ] ]
data = json . dumps ( { 'ids' : items } )
result = self . client . delete ( url = url , body = data , headers = { 'Content-Type' : 'application/json' } )
return result [ 'body' ] |
def ancestors ( self ) :
"""Get a list of ancestors .
@ return : A list of ancestors .
@ rtype : [ L { Element } , . . ]""" | ancestors = [ ]
p = self . parent
while p is not None :
ancestors . append ( p )
p = p . parent
return ancestors |
def udf ( f = None , returnType = StringType ( ) ) :
"""Creates a user defined function ( UDF ) .
. . note : : The user - defined functions are considered deterministic by default . Due to
optimization , duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query . If your function is not deterministic , call
` asNondeterministic ` on the user defined function . E . g . :
> > > from pyspark . sql . types import IntegerType
> > > import random
> > > random _ udf = udf ( lambda : int ( random . random ( ) * 100 ) , IntegerType ( ) ) . asNondeterministic ( )
. . note : : The user - defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally . If the functions
can fail on special rows , the workaround is to incorporate the condition into the functions .
. . note : : The user - defined functions do not take keyword arguments on the calling side .
: param f : python function if used as a standalone function
: param returnType : the return type of the user - defined function . The value can be either a
: class : ` pyspark . sql . types . DataType ` object or a DDL - formatted type string .
> > > from pyspark . sql . types import IntegerType
> > > slen = udf ( lambda s : len ( s ) , IntegerType ( ) )
> > > @ udf
. . . def to _ upper ( s ) :
. . . if s is not None :
. . . return s . upper ( )
> > > @ udf ( returnType = IntegerType ( ) )
. . . def add _ one ( x ) :
. . . if x is not None :
. . . return x + 1
> > > df = spark . createDataFrame ( [ ( 1 , " John Doe " , 21 ) ] , ( " id " , " name " , " age " ) )
> > > df . select ( slen ( " name " ) . alias ( " slen ( name ) " ) , to _ upper ( " name " ) , add _ one ( " age " ) ) . show ( )
| slen ( name ) | to _ upper ( name ) | add _ one ( age ) |
| 8 | JOHN DOE | 22 |""" | # The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user . Some of behaviors are buggy and might be changed in the near
# future . The table might have to be eventually documented externally .
# Please see SPARK - 25666 ' s PR to see the codes in order to generate the table below .
# + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + # noqa
# | SQL Type \ Python Value ( Type ) | None ( NoneType ) | True ( bool ) | 1 ( int ) | 1 ( long ) | a ( str ) | a ( unicode ) | 1970-01-01 ( date ) | 1970-01-01 00:00:00 ( datetime ) | 1.0 ( float ) | array ( ' i ' , [ 1 ] ) ( array ) | [ 1 ] ( list ) | ( 1 , ) ( tuple ) | ABC ( bytearray ) | 1 ( Decimal ) | { ' a ' : 1 } ( dict ) | Row ( kwargs = 1 ) ( Row ) | Row ( namedtuple = 1 ) ( Row ) | # noqa
# + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + # noqa
# | boolean | None | True | None | None | None | None | None | None | None | None | None | None | None | None | None | X | X | # noqa
# | tinyint | None | None | 1 | 1 | None | None | None | None | None | None | None | None | None | None | None | X | X | # noqa
# | smallint | None | None | 1 | 1 | None | None | None | None | None | None | None | None | None | None | None | X | X | # noqa
# | int | None | None | 1 | 1 | None | None | None | None | None | None | None | None | None | None | None | X | X | # noqa
# | bigint | None | None | 1 | 1 | None | None | None | None | None | None | None | None | None | None | None | X | X | # noqa
# | string | None | u ' true ' | u ' 1 ' | u ' 1 ' | u ' a ' | u ' a ' | u ' java . util . Grego . . . | u ' java . util . Grego . . . | u ' 1.0 ' | u ' [ I @ 24a83055 ' | u ' [ 1 ] ' | u ' [ Ljava . lang . Obj . . . | u ' [ B @ 49093632 ' | u ' 1 ' | u ' { a = 1 } ' | X | X | # noqa
# | date | None | X | X | X | X | X | datetime . date ( 197 . . . | datetime . date ( 197 . . . | X | X | X | X | X | X | X | X | X | # noqa
# | timestamp | None | X | X | X | X | X | X | datetime . datetime . . . | X | X | X | X | X | X | X | X | X | # noqa
# | float | None | None | None | None | None | None | None | None | 1.0 | None | None | None | None | None | None | X | X | # noqa
# | double | None | None | None | None | None | None | None | None | 1.0 | None | None | None | None | None | None | X | X | # noqa
# | array < int > | None | None | None | None | None | None | None | None | None | [ 1 ] | [ 1 ] | [ 1 ] | [ 65 , 66 , 67 ] | None | None | X | X | # noqa
# | binary | None | None | None | None | bytearray ( b ' a ' ) | bytearray ( b ' a ' ) | None | None | None | None | None | None | bytearray ( b ' ABC ' ) | None | None | X | X | # noqa
# | decimal ( 10,0 ) | None | None | None | None | None | None | None | None | None | None | None | None | None | Decimal ( ' 1 ' ) | None | X | X | # noqa
# | map < string , int > | None | None | None | None | None | None | None | None | None | None | None | None | None | None | { u ' a ' : 1 } | X | X | # noqa
# | struct < _ 1 : int > | None | X | X | X | X | X | X | X | X | X | Row ( _ 1 = 1 ) | Row ( _ 1 = 1 ) | X | X | Row ( _ 1 = None ) | Row ( _ 1 = 1 ) | Row ( _ 1 = 1 ) | # noqa
# + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + - - - - - + # noqa
# Note : DDL formatted string is used for ' SQL Type ' for simplicity . This string can be
# used in ` returnType ` .
# Note : The values inside of the table are generated by ` repr ` .
# Note : Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice .
# Note : ' X ' means it throws an exception during the conversion .
# decorator @ udf , @ udf ( ) , @ udf ( dataType ( ) )
if f is None or isinstance ( f , ( str , DataType ) ) : # If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools . partial ( _create_udf , returnType = return_type , evalType = PythonEvalType . SQL_BATCHED_UDF )
else :
return _create_udf ( f = f , returnType = returnType , evalType = PythonEvalType . SQL_BATCHED_UDF ) |
def observe_command ( command , ** kwargs ) :
"""Executes the given command and captures the output without any output to the console
: param str | list command :
: kwargs :
* ` shell ` ( ` ` bool ` ` = False ) - -
* ` timeout ` ( ` ` int ` ` = 15 ) - - Timeout in seconds
* ` stdin ` ( ` ` * ` ` = None ) - -
* ` stdout ` ( ` ` * ` ` = None ) - -
* ` stderr ` ( ` ` * ` ` = None ) - -
* ` cwd ` ( ` ` string ` ` = None ) - -
: return : CommandReturnValue""" | shell = kwargs . get ( 'shell' , False )
timeout = kwargs . get ( 'timeout' , 15 )
stdin = kwargs . get ( 'stdin' , subprocess . PIPE )
stdout = kwargs . get ( 'stdout' , subprocess . PIPE )
stderr = kwargs . get ( 'stderr' , subprocess . PIPE )
cwd = kwargs . get ( 'cwd' , None )
kwargs . update ( shell = shell )
kwargs . update ( stdin = stdin )
kwargs . update ( stdout = stdout )
kwargs . update ( stderr = stderr )
kwargs . update ( cwd = cwd )
if not isinstance ( command , list ) :
command = shlex . split ( command )
# TODO : implement and process stdin - 1
proc = subprocess . Popen ( command , ** kwargs )
try : # only Python versions from 3.3 have the ' timeout ' argument
if sys . version_info [ 0 ] >= 3 and sys . version_info [ 1 ] >= 3 :
proc_stdout , proc_stderr = proc . communicate ( timeout = timeout )
else :
proc_stdout , proc_stderr = proc . communicate ( )
except subprocess . TimeoutExpired :
proc . kill ( )
proc_stdout , proc_stderr = proc . communicate ( )
# TODO : implement and process stdin - 2
# process stdin
# try :
# _ stdin = proc . stdin . read ( )
# except IOError :
# _ stdin = None
# if not _ stdin :
# _ stdin = None
# process stdout
try :
_stdout = proc_stdout . decode ( 'utf-8' )
except IOError :
_stdout = None
if not _stdout :
_stdout = None
# process stderr
try :
_stderr = proc_stderr . decode ( 'utf-8' )
except IOError :
_stderr = None
if not _stderr :
_stderr = None
return CommandReturnValue ( return_value = proc . returncode , stdout = _stdout , stderr = _stderr ) |
def plot ( self ) :
"""The plot object , see [ Pyplot ] [ 1 ] .
If one does not exist , it will be created first with [ ` add _ subplot ` ] [ 2 ] .
[1 ] : http : / / matplotlib . org / api / pyplot _ api . html
[2 ] : http : / / matplotlib . org / api / figure _ api . html # matplotlib . figure . Figure . add _ subplot""" | if not hasattr ( self , '_plot' ) :
plot = self . figure . add_subplot ( 111 )
pointspace = self . fit . pointspace ( num = self . options [ 'points' ] )
if any ( v is not None for v in self . fit . data . error ) :
plot_data = plot . errorbar
self . options [ 'data' ] [ 'xerr' ] = self . fit . data . error [ 0 ]
self . options [ 'data' ] [ 'yerr' ] = self . fit . data . error [ 1 ]
else :
plot_data = plot . plot
plot_data ( * pointspace [ 'data' ] , ** self . options [ 'data' ] )
plot . plot ( * pointspace [ 'fit' ] , ** self . options [ 'fit' ] )
text = { }
for v in ( 'independent' , 'dependent' ) :
meta = getattr ( self . fit , v )
text [ v ] = { 'name' : meta [ 'name' ] if 'name' in meta else '' , 'units' : ' (' + meta [ 'units' ] + ')' if 'units' in meta else '' }
plot . set_xlabel ( text [ 'independent' ] [ 'name' ] + text [ 'independent' ] [ 'units' ] )
plot . set_ylabel ( text [ 'dependent' ] [ 'name' ] + text [ 'dependent' ] [ 'units' ] )
self . _plot = plot
return self . _plot |
def map2rpn ( map , obj ) :
"""Convert a Mongodb - like dictionary to a RPN list of operands and operators .
Reverse Polish notation ( RPN ) is a mathematical notation in which every
operator follows all of its operands , e . g .
3 - 4 + 5 - - > 3 4 - 5 +
> > > d = { 2.0 : { ' $ eq ' : 1.0 } }
> > > assert map2rpn ( d , None ) = = [ 2.0 , 1.0 , ' $ eq ' ]""" | rpn = [ ]
for k , v in map . items ( ) :
if k in _ALL_OPS :
if isinstance ( v , collections . abc . Mapping ) : # e . g " $ not " : { " $ gt " : " one " }
# print ( " in op _ vmap " , k , v )
values = map2rpn ( v , obj )
rpn . extend ( values )
rpn . append ( k )
elif isinstance ( v , ( list , tuple ) ) : # e . g " $ and " : [ { " $ not " : { " one " : 1.0 } } , { " two " : { " $ lt " : 3 } } ] }
# print ( " in _ op _ list " , k , v )
for d in v :
rpn . extend ( map2rpn ( d , obj ) )
rpn . append ( k )
else : # Examples
# 1 ) " $ eq " " : " attribute _ name "
# 2 ) " $ eq " " : 1.0
try : # print ( " in _ otherv " , k , v )
rpn . append ( getattr ( obj , v ) )
rpn . append ( k )
except TypeError : # print ( " in _ otherv , raised " , k , v )
rpn . extend ( [ v , k ] )
else :
try :
k = getattr ( obj , k )
except TypeError :
k = k
if isinstance ( v , collections . abc . Mapping ) : # " one " : { " $ eq " : 1.0 } }
values = map2rpn ( v , obj )
rpn . append ( k )
rpn . extend ( values )
else : # " one " : 1.0
rpn . extend ( [ k , v , "$eq" ] )
return rpn |
def wiftohex ( wifkey ) :
'''Returns a tuple of :
(64 - char hex key , 2 - char hex prefix for key , if it was compressed )''' | iscompressed = False
wifkey = normalize_input ( wifkey )
assert len ( wifkey ) == 50 or len ( wifkey ) == 51 or len ( wifkey ) == 52
for c in wifkey :
if c not in b58_digits :
raise Exception ( "Not WIF" )
key = b58d ( wifkey )
prefix , key = key [ : 2 ] , key [ 2 : ]
if len ( key ) == 66 :
assert key [ - 2 : ] == '01'
key = key [ : - 2 ]
iscompressed = True
assert len ( key ) == 64
return key , prefix , iscompressed |
def daemon_status ( self ) :
"""Displays the load status of a service .
: rtype : dict""" | url = self . _build_url ( constants . DAEMON_STATUS_ENDPOINT )
json = self . client . get ( url , timeout = self . timeout )
return json |
def send ( self , subname , value , timestamp = None ) :
'''Send the data to statsd via self . connection
: keyword subname : The subname to report the data to ( appended to the
client name )
: type subname : str
: keyword value : The raw value to send''' | if timestamp is None :
ts = int ( dt . datetime . now ( ) . strftime ( "%s" ) )
else :
ts = timestamp
name = self . _get_name ( self . name , subname )
self . logger . info ( '%s: %s %s' % ( name , value , ts ) )
return statsd . Client . _send ( self , { name : '%s|r|%s' % ( value , ts ) } ) |
def add_constant ( self , stream , value ) :
"""Store a constant value for use in this sensor graph .
Constant assignments occur after all sensor graph nodes have been
allocated since they must be propogated to all appropriate virtual
stream walkers .
Args :
stream ( DataStream ) : The constant stream to assign the value to
value ( int ) : The value to assign .""" | if stream in self . constant_database :
raise ArgumentError ( "Attempted to set the same constant twice" , stream = stream , old_value = self . constant_database [ stream ] , new_value = value )
self . constant_database [ stream ] = value |
def __print_namespace_help ( self , session , namespace , cmd_name = None ) :
"""Prints the documentation of all the commands in the given name space ,
or only of the given command
: param session : Session Handler
: param namespace : Name space of the command
: param cmd _ name : Name of the command to show , None to show them all""" | session . write_line ( "=== Name space '{0}' ===" , namespace )
# Get all commands in this name space
if cmd_name is None :
names = [ command for command in self . _commands [ namespace ] ]
names . sort ( )
else :
names = [ cmd_name ]
first_cmd = True
for command in names :
if not first_cmd : # Print an empty line
session . write_line ( "\n" )
self . __print_command_help ( session , namespace , command )
first_cmd = False |
def update_release ( self , release , project , release_id ) :
"""UpdateRelease .
[ Preview API ] Update a complete release object .
: param : class : ` < Release > < azure . devops . v5_1 . release . models . Release > ` release : Release object for update .
: param str project : Project ID or project name
: param int release _ id : Id of the release to update .
: rtype : : class : ` < Release > < azure . devops . v5_1 . release . models . Release > `""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if release_id is not None :
route_values [ 'releaseId' ] = self . _serialize . url ( 'release_id' , release_id , 'int' )
content = self . _serialize . body ( release , 'Release' )
response = self . _send ( http_method = 'PUT' , location_id = 'a166fde7-27ad-408e-ba75-703c2cc9d500' , version = '5.1-preview.8' , route_values = route_values , content = content )
return self . _deserialize ( 'Release' , response ) |
def _default ( self , obj : object ) :
"""Return a serializable version of obj . Overrides JsonObj _ default method
: param obj : Object to be serialized
: return : Serialized version of obj""" | return None if obj is JSGNull else obj . val if type ( obj ) is AnyType else JSGObject . _strip_nones ( obj . __dict__ ) if isinstance ( obj , JsonObj ) else cast ( JSGString , obj ) . val if issubclass ( type ( obj ) , JSGString ) else str ( obj ) |
def check_call_out ( command ) :
"""Run the given command ( with shell = False ) and return the output as a
string . Strip the output of enclosing whitespace .
If the return code is non - zero , throw GitInvocationError .""" | # start external command process
p = subprocess . Popen ( command , stdout = subprocess . PIPE , stderr = subprocess . PIPE )
# get outputs
out , _ = p . communicate ( )
# throw exception if process failed
if p . returncode != 0 :
raise GitInvocationError , 'failed to run "%s"' % " " . join ( command )
return out . strip ( ) |
def add_priorfactor ( self , ** kwargs ) :
"""Adds given values to priorfactors
If given keyword exists already , error will be raised
to use : func : ` EclipsePopulation . change _ prior ` instead .""" | for kw in kwargs :
if kw in self . priorfactors :
logging . error ( '%s already in prior factors for %s. use change_prior function instead.' % ( kw , self . model ) )
continue
else :
self . priorfactors [ kw ] = kwargs [ kw ]
logging . info ( '%s added to prior factors for %s' % ( kw , self . model ) ) |
def update ( self , ignore_warnings = None ) :
"""Update this app _ profile .
. . note : :
Update any or all of the following values :
` ` routing _ policy _ type ` `
` ` description ` `
` ` cluster _ id ` `
` ` allow _ transactional _ writes ` `""" | update_mask_pb = field_mask_pb2 . FieldMask ( )
if self . description is not None :
update_mask_pb . paths . append ( "description" )
if self . routing_policy_type == RoutingPolicyType . ANY :
update_mask_pb . paths . append ( "multi_cluster_routing_use_any" )
else :
update_mask_pb . paths . append ( "single_cluster_routing" )
return self . instance_admin_client . update_app_profile ( app_profile = self . _to_pb ( ) , update_mask = update_mask_pb , ignore_warnings = ignore_warnings , ) |
def parse_statement ( self ) :
"""Parse a single statement .""" | token = self . stream . current
if token . type != 'name' :
self . fail ( 'tag name expected' , token . lineno )
self . _tag_stack . append ( token . value )
pop_tag = True
try :
if token . value in _statement_keywords :
return getattr ( self , 'parse_' + self . stream . current . value ) ( )
if token . value == 'call' :
return self . parse_call_block ( )
if token . value == 'filter' :
return self . parse_filter_block ( )
ext = self . extensions . get ( token . value )
if ext is not None :
return ext ( self )
# did not work out , remove the token we pushed by accident
# from the stack so that the unknown tag fail function can
# produce a proper error message .
self . _tag_stack . pop ( )
pop_tag = False
self . fail_unknown_tag ( token . value , token . lineno )
finally :
if pop_tag :
self . _tag_stack . pop ( ) |
def read ( self , timeout = None ) :
'''Read from the transport . If no data is available , should return None .
If timeout > 0 , will only block for ` timeout ` seconds .''' | # If currently locked , another greenlet is trying to read , so yield
# control and then return none . Required if a Connection is configured
# to be synchronous , a sync callback is trying to read , and there ' s
# another read loop running read _ frames . Without it , the run loop will
# release the lock but then immediately acquire it again . Yielding
# control in the reading thread after bytes are read won ' t fix
# anything , because it ' s quite possible the bytes read resulted in a
# frame that satisfied the synchronous callback , and so this needs to
# return immediately to first check the current status of synchronous
# callbacks before attempting to read again .
if self . _read_lock . locked ( ) :
self . _read_wait . wait ( timeout )
return None
self . _read_lock . acquire ( )
try :
return super ( GeventTransport , self ) . read ( timeout = timeout )
finally :
self . _read_lock . release ( )
self . _read_wait . set ( )
self . _read_wait . clear ( ) |
def get_marked_up_list ( self , tokensource ) :
"""an updated version of pygments . formatter . format _ unencoded""" | # import ipdb ; ipdb . set _ trace ( )
source = self . _format_lines ( tokensource )
if self . hl_lines :
source = self . _highlight_lines ( source )
if not self . nowrap :
if self . linenos == 2 :
source = self . _wrap_inlinelinenos ( source )
if self . lineanchors :
source = self . _wrap_lineanchors ( source )
if self . linespans :
source = self . _wrap_linespans ( source )
if self . linenos == 1 :
source = self . _wrap_tablelinenos ( source )
# instead of this :
# for t , piece in source :
# outfile . write ( piece )
# evaluate the generator to a list of just source code :
IS_CODE_INDEX = 0
HTML_VALUE_INDEX = 1
IS_CODE_VAL = 1
source_list = [ html_line [ HTML_VALUE_INDEX ] for html_line in self . _wrap_div ( self . _wrap_pre ( source ) ) if html_line [ IS_CODE_INDEX ] == IS_CODE_VAL ]
return source_list |
def _make_login ( self ) :
"""Private method that makes login""" | login_url = '{}data/v1/session/login?user={}&pass={}&sid={}' . format ( self . base_url , self . username , self . password , self . _parse_session_id ( self . _session_info ) )
self . _session_info = get_json ( login_url ) |
def branches_containing ( commit ) :
"""Return a list of branches conatining that commit""" | lines = run ( 'branch --contains %s' % commit ) . splitlines ( )
return [ l . lstrip ( '* ' ) for l in lines ] |
def invitation_accepted ( event ) :
"""When an invitation is accepted , add the user to the team""" | request = getRequest ( )
storage = get_storage ( )
if event . token_id not in storage :
return
ws_uid , username = storage [ event . token_id ]
storage [ event . token_id ]
acl_users = api . portal . get_tool ( 'acl_users' )
acl_users . updateCredentials ( request , request . response , username , None )
catalog = api . portal . get_tool ( name = "portal_catalog" )
brain = catalog . unrestrictedSearchResults ( UID = ws_uid ) [ 0 ]
with api . env . adopt_roles ( [ "Manager" ] ) :
ws = IWorkspace ( brain . getObject ( ) )
for name in ws . members :
member = api . user . get ( username = name )
if member is not None :
if member . getUserName ( ) == username :
api . portal . show_message ( _ ( 'Oh boy, oh boy, you are already a member' ) , request , )
break
else :
ws . add_to_team ( user = username )
api . portal . show_message ( _ ( 'Welcome to our family, Stranger' ) , request , ) |
def current ( cls , service , port ) :
"""Returns a Node instance representing the current service node .
Collects the host and IP information for the current machine and
the port information from the given service .""" | host = socket . getfqdn ( )
return cls ( host = host , ip = socket . gethostbyname ( host ) , port = port , metadata = service . metadata ) |
def absent ( name , region = None , key = None , keyid = None , profile = None ) :
'''Ensure a pipeline with the service _ name does not exist
name
Name of the service to ensure a data pipeline does not exist for .
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string )
that contains a dict with region , key and keyid .''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
result_pipeline_id = __salt__ [ 'boto_datapipeline.pipeline_id_from_name' ] ( name , region = region , key = key , keyid = keyid , profile = profile , )
if 'error' not in result_pipeline_id :
pipeline_id = result_pipeline_id [ 'result' ]
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Data pipeline {0} set to be deleted.' . format ( name )
ret [ 'result' ] = None
return ret
else :
__salt__ [ 'boto_datapipeline.delete_pipeline' ] ( pipeline_id , region = region , key = key , keyid = keyid , profile = profile , )
ret [ 'changes' ] [ 'old' ] = { 'pipeline_id' : pipeline_id }
ret [ 'changes' ] [ 'new' ] = None
else :
ret [ 'comment' ] = 'AWS data pipeline {0} absent.' . format ( name )
return ret |
def super_lm_moe ( ) :
"""Add mixture of experts with ~ 1B params .""" | hparams = super_lm_base ( )
hparams . layers = ( ( "n,att,m,d,a," "n,moe,m,d,a," ) * 4 + "n,ffn,d" )
hparams . moe_num_experts = 32
hparams . moe_hidden_sizes = "1024"
return hparams |
def write_oplog_progress ( self ) :
"""Writes oplog progress to file provided by user""" | if self . oplog_checkpoint is None :
return None
with self . oplog_progress as oplog_prog :
oplog_dict = oplog_prog . get_dict ( )
items = [ [ name , util . bson_ts_to_long ( oplog_dict [ name ] ) ] for name in oplog_dict ]
if not items :
return
# write to temp file
backup_file = self . oplog_checkpoint + ".backup"
os . rename ( self . oplog_checkpoint , backup_file )
# for each of the threads write to file
with open ( self . oplog_checkpoint , "w" ) as dest :
if len ( items ) == 1 : # Write 1 - dimensional array , as in previous versions .
json_str = json . dumps ( items [ 0 ] )
else : # Write a 2d array to support sharded clusters .
json_str = json . dumps ( items )
try :
dest . write ( json_str )
except IOError : # Basically wipe the file , copy from backup
dest . truncate ( )
with open ( backup_file , "r" ) as backup :
shutil . copyfile ( backup , dest )
os . remove ( backup_file ) |
def serialize ( self , raw = False ) :
'''Encode the private part of the key in a base64 format by default ,
but when raw is True it will return hex encoded bytes .
@ return : bytes''' | if raw :
return self . _key . encode ( )
return self . _key . encode ( nacl . encoding . Base64Encoder ) |
def split_tokens ( self ) :
"""Split string into tokens ( lazy ) .""" | if not self . _split_tokens_calculated : # split into items ( whitespace split )
self . _split_tokens = self . _line_str . split ( )
self . _split_tokens_calculated = True
return self . _split_tokens |
def _find_from_file ( full_doc , from_file_keyword ) :
"""Finds a line in < full _ doc > like
< from _ file _ keyword > < colon > < path >
and return path""" | path = None
for line in full_doc . splitlines ( ) :
if from_file_keyword in line :
parts = line . strip ( ) . split ( ':' )
if len ( parts ) == 2 and parts [ 0 ] . strip ( ) == from_file_keyword :
path = parts [ 1 ] . strip ( )
break
return path |
def _local_coverage ( reader , features , read_strand = None , fragment_size = None , shift_width = 0 , bins = None , use_score = False , accumulate = True , preserve_total = False , method = None , function = "mean" , zero_inf = True , zero_nan = True , processes = None , stranded = True , verbose = False ) :
"""Returns a binned vector of coverage .
Computes a 1D vector of coverage at the coordinates for each feature in
` features ` , extending each read by ` fragmentsize ` bp .
Some arguments cannot be used for bigWig files due to the structure of
these files . The parameters docstring below indicates whether or not an
argument can be used with bigWig files .
Depending on the arguments provided , this method can return a vector
containing values from a single feature or from concatenated features .
An example of the flexibility afforded by the latter case :
` features ` can be a 3 - tuple of pybedtools . Intervals representing ( TSS
+ 1kb upstream , gene , TTS + 1kb downstream ) and ` bins ` can be [ 100,
1000 , 100 ] . This will return a vector of length 1200 containing the
three genomic intervals binned into 100 , 1000 , and 100 bins
respectively . Note that is up to the caller to construct the right
axes labels in the final plot !
Parameters
features : str , interval - like object , or list
Can be a single interval or an iterable yielding intervals .
Interval - like objects must have chrom , start , and stop attributes , and
optionally a strand attribute . One exception to this that if
` features ` is a single string , it can be of the form " chrom : start - stop "
or " chrom : start - stop [ strand ] " .
If ` features ` is a single interval , then return a 1 - D array for that
interval .
If ` features ` is an iterable of intervals , then return a 1 - D
array that is a concatenation of signal for these intervals .
Available for bigWig .
bins : None , int , list
If ` bins ` is None , then each value in the returned array will
correspond to one bp in the genome .
If ` features ` is a single Interval , then ` bins ` is an integer or None .
If ` features ` is an iterable of Intervals , ` bins ` is an iterable of
integers of the same length as ` features ` .
Available for bigWig .
fragment _ size : None or int
If not None , then each item from the genomic signal ( e . g . , reads from
a BAM file ) will be extended ` fragment _ size ` bp in the 3 ' direction .
Higher fragment sizes will result in smoother signal . Not available
for bigWig .
shift _ width : int
Each item from the genomic signal ( e . g . , reads from a BAM
file ) will be shifted ` shift _ width ` bp in the 3 ' direction . This can
be useful for reconstructing a ChIP - seq profile , using the shift width
determined from the peak - caller ( e . g . , modeled ` d ` in MACS ) . Not
available for bigWig .
read _ strand : None or str
If ` read _ strand ` is one of " + " or " - " , then only items from the genomic
signal ( e . g . , reads from a BAM file ) on that strand will be considered
and reads on the opposite strand ignored . Useful for plotting genomic
signal for stranded libraries . Not available for bigWig .
stranded : bool
If True , then the profile will be reversed for features whose strand
attribute is " - " .
use _ score : bool
If True , then each bin will contain the sum of the * score * attribute of
genomic features in that bin instead of the * number * of genomic
features falling within each bin . Not available for bigWig .
accumulate : bool
If False , then only record * that * there was something there , rather
than acumulating reads . This is useful for making matrices with called
peaks . Available for bigWig .
preserve _ total : bool
If True , re - scales the returned value so that each binned row ' s total
is equal to the sum of the original , un - binned data . The units of the
returned array will be in " total per bin " . This is useful for , e . g . ,
counting reads in features . If ` preserve _ total ` is False , then the
returned array will have units of " density " ; this is more generally
useful and is the default behavior . Available for bigWig , but not when
using method = " ucsc _ summarize " .
method : str ; one of [ " summarize " | " get _ as _ array " | " ucsc _ summarize " ]
Only used for bigWig . The method specifies how data are extracted from
the bigWig file . " summarize " is the default . It ' s quite fast , but may
yield slightly different results when compared to running this same
function on the BAM file from which the bigWig was created .
" summarize " uses bx - python . The values returned will not be exactly
the same as the values returned when local _ coverage is called on a BAM ,
BED , or bigBed file , but they will be close . This method is quite
fast , and is the default when bins is not None .
" get _ as _ array " uses bx - python , but does a separate binning step . This
can be slower than the other two methods , but the results are exactly
the same as those from a BAM , BED , or bigBed file . This method is
always used if bins = None .
" ucsc _ summarize " is an alternative version of " summarize " . It uses the
UCSC program ` bigWigSummary ` , which must already installed and on your
path .
function : str ; one of [ ' sum ' | ' mean ' | ' min ' | ' max ' | ' std ' ]
Determine the nature of the values returned . Only valid if ` method ` is
" summarize " or " ucsc _ summarize " , which also implies bigWig . Default is
" mean " . If ` method = " ucsc _ summarize " , then there is an additional option
for function , " coverage " , which returns the percent of region that is
covered .
zero _ inf , zero _ nan : bool
Only used for bigWig . If either are True , sets any missing or inf
values to zero before returning .
If ` method = " ucsc _ summarize " ` , missinv values are always reported as
zero . If ` method = " get _ as _ array " ` , missing values always reported as
nan .
Values can be - inf , inf , or nan for missing values when
` method = " summarize " ` according to the following table :
` function ` missing values appear as
" sum " 0
" mean " nan
" min " inf
" max " - inf
" std " nan
processes : int or None
The feature can be split across multiple processes .
Returns
1 - d NumPy array
Notes
If a feature has a " - " strand attribute , then the resulting profile will be
* relative to a minus - strand feature * . That is , the resulting profile will
be reversed .
Returns arrays ` x ` and ` y ` . ` x ` is in genomic coordinates , and ` y ` is
the coverage at each of those coordinates after extending fragments .
The total number of reads is guaranteed to be the same no matter how it ' s
binned .
( with ideas from
http : / / www - huber . embl . de / users / anders / HTSeq / doc / tss . html )""" | # bigWig files are handled differently , so we need to know if we ' re working
# with one ; raise exeception if a kwarg was supplied that ' s not supported .
if isinstance ( reader , filetype_adapters . BigWigAdapter ) :
is_bigwig = True
defaults = ( ( 'read_strand' , read_strand , None ) , ( 'fragment_size' , fragment_size , None ) , ( 'shift_width' , shift_width , 0 ) , ( 'use_score' , use_score , False ) , ( 'preserve_total' , preserve_total , False ) , )
for name , check , default in defaults :
if ( ( ( default is None ) and ( check is not default ) ) or ( check != default ) ) :
raise ArgumentError ( "Argument '%s' not supported for bigWig" % name )
if method == 'ucsc_summarize' :
if preserve_total :
raise ArgumentError ( "preserve_total=True not supported when using " "method='ucsc_summarize'" )
else :
is_bigwig = False
if isinstance ( reader , filetype_adapters . BamAdapter ) :
if use_score :
raise ArgumentError ( "Argument 'use_score' not supported for " "bam" )
# e . g . , features = " chr1:1-1000"
if isinstance ( features , basestring ) :
features = helpers . tointerval ( features )
if not ( ( isinstance ( features , list ) or isinstance ( features , tuple ) ) ) :
if bins is not None :
if not isinstance ( bins , int ) :
raise ArgumentError ( "bins must be an int, got %s" % type ( bins ) )
features = [ features ]
bins = [ bins ]
else :
if bins is None :
bins = [ None for i in features ]
if not len ( bins ) == len ( features ) :
raise ArgumentError ( "bins must have same length as feature list" )
# nomenclature :
# " window " is region we ' re getting data for
# " alignment " is one item in that region
profiles = [ ]
xs = [ ]
for window , nbin in zip ( features , bins ) :
window = helpers . tointerval ( window )
chrom = window . chrom
start = window . start
stop = window . stop
strand = window . strand
if not is_bigwig : # Extend the window to catch reads that would extend into the
# requested window
_fs = fragment_size or 0
padded_window = pybedtools . Interval ( chrom , max ( start - _fs - shift_width , 0 ) , stop + _fs + shift_width , )
window_size = stop - start
# start off with an array of zeros to represent the window
profile = np . zeros ( window_size , dtype = float )
for interval in reader [ padded_window ] :
if read_strand :
if interval . strand != read_strand :
continue
# Shift interval by modeled distance , if specified .
if shift_width :
if interval . strand == '-' :
interval . start -= shift_width
interval . stop -= shift_width
else :
interval . start += shift_width
interval . stop += shift_width
# Extend fragment size from 3'
if fragment_size :
if interval . strand == '-' :
interval . start = interval . stop - fragment_size
else :
interval . stop = interval . start + fragment_size
# Convert to 0 - based coords that can be used as indices into
# array
start_ind = interval . start - start
# If the feature goes out of the window , then only include the
# part that ' s inside the window
start_ind = max ( start_ind , 0 )
# Same thing for stop
stop_ind = interval . stop - start
stop_ind = min ( stop_ind , window_size )
# Skip if the feature is shifted outside the window . This can
# happen with large values of ` shift _ width ` .
if start_ind >= window_size or stop_ind < 0 :
continue
# Finally , increment profile
if use_score :
score = float ( interval . score )
else :
score = 1
if accumulate :
if preserve_total :
profile [ start_ind : stop_ind ] += ( score / float ( ( stop_ind - start_ind ) ) )
else :
profile [ start_ind : stop_ind ] += score
else :
profile [ start_ind : stop_ind ] = score
else : # it ' s a bigWig
profile = reader . summarize ( window , method = method , function = function , bins = ( nbin or len ( window ) ) , zero_inf = zero_inf , zero_nan = zero_nan , )
# If no bins , return genomic coords
if ( nbin is None ) :
x = np . arange ( start , stop )
# Otherwise do the downsampling ; resulting x is stll in genomic
# coords
else :
if preserve_total :
total = float ( profile . sum ( ) )
if not is_bigwig or method == 'get_as_array' :
xi , profile = rebin ( x = np . arange ( start , stop ) , y = profile , nbin = nbin )
if not accumulate :
nonzero = profile != 0
profile [ profile != 0 ] = 1
x = xi
else :
x = np . linspace ( start , stop - 1 , nbin )
# Minus - strand profiles should be flipped left - to - right .
if stranded and strand == '-' :
profile = profile [ : : - 1 ]
xs . append ( x )
if preserve_total :
scale = profile . sum ( ) / total
profile /= scale
profiles . append ( profile )
stacked_xs = np . hstack ( xs )
stacked_profiles = np . hstack ( profiles )
del xs
del profiles
return stacked_xs , stacked_profiles |
def iterShapes ( self ) :
"""Serves up shapes in a shapefile as an iterator . Useful
for handling large shapefiles .""" | shp = self . __getFileObj ( self . shp )
shp . seek ( 0 , 2 )
self . shpLength = shp . tell ( )
shp . seek ( 100 )
while shp . tell ( ) < self . shpLength :
yield self . __shape ( ) |
def _entryChanged ( self , entry ) :
"""This is called when a log entry is changed""" | # resave the log
self . purrer . save ( )
# redo entry item
if entry . tw_item :
number = entry . tw_item . _ientry
entry . tw_item = None
self . etw . takeTopLevelItem ( number )
if number :
after = self . etw . topLevelItem ( number - 1 )
else :
after = None
self . _addEntryItem ( entry , number , after )
# log will have changed , so update the viewer
self . _updateViewer ( ) |
def build ( self ) :
"""Create and start up the internal workers .""" | # If there ' s no output tube , it means that this stage
# is at the end of a fork ( hasn ' t been linked to any stage downstream ) .
# Therefore , create one output tube .
if not self . _output_tubes :
self . _output_tubes . append ( self . _worker_class . getTubeClass ( ) ( ) )
self . _worker_class . assemble ( self . _worker_args , self . _input_tube , self . _output_tubes , self . _size , self . _disable_result , self . _do_stop_task , )
# Build all downstream stages .
for stage in self . _next_stages :
stage . build ( ) |
def set_idle_priority ( pid = None ) :
"""Puts a process in the idle io priority class .
If pid is omitted , applies to the current process .""" | if pid is None :
pid = os . getpid ( )
lib . ioprio_set ( lib . IOPRIO_WHO_PROCESS , pid , lib . IOPRIO_PRIO_VALUE ( lib . IOPRIO_CLASS_IDLE , 0 ) ) |
def pixelFormat ( self ) :
"""Returns a string representing the pixel format of the video stream . e . g . yuv420p .
Returns none is it is not a video stream .""" | f = None
if self . isVideo ( ) :
if self . __dict__ [ 'pix_fmt' ] :
f = self . __dict__ [ 'pix_fmt' ]
return f |
def _cast ( self , _input , _output ) :
"""Transforms a pair of input / output into the real slim shoutput .
: param _ input : Bag
: param _ output : mixed
: return : Bag""" | if isenvelope ( _output ) :
_output , _flags , _options = _output . unfold ( )
else :
_flags , _options = [ ] , { }
if len ( _flags ) : # TODO : parse flags to check constraints are respected ( like not modified alone , etc . )
if F_NOT_MODIFIED in _flags :
return _input
if F_INHERIT in _flags :
if self . _output_type is None :
self . _output_type = concat_types ( self . _input_type , self . _input_length , self . _output_type , len ( _output ) )
_output = _input + ensure_tuple ( _output )
if not self . _output_type :
if issubclass ( type ( _output ) , tuple ) :
self . _output_type = type ( _output )
return ensure_tuple ( _output , cls = self . _output_type ) |
def present ( name , ** kwargs ) :
'''Ensure a job is present in the schedule
name
The unique name that is given to the scheduled job .
seconds
The scheduled job will be executed after the specified
number of seconds have passed .
minutes
The scheduled job will be executed after the specified
number of minutes have passed .
hours
The scheduled job will be executed after the specified
number of hours have passed .
days
The scheduled job will be executed after the specified
number of days have passed .
when
This will schedule the job at the specified time ( s ) .
The when parameter must be a single value or a dictionary
with the date string ( s ) using the dateutil format .
Requires python - dateutil .
cron
This will schedule the job at the specified time ( s )
using the crontab format .
Requires python - croniter .
run _ on _ start
Whether the job will run when Salt minion start . Value should be
a boolean .
function
The function that should be executed by the scheduled job .
job _ args
The arguments that will be used by the scheduled job .
job _ kwargs
The keyword arguments that will be used by the scheduled job .
maxrunning
Ensure that there are no more than N copies of a particular job running .
jid _ include
Include the job into the job cache .
splay
The amount of time in seconds to splay a scheduled job .
Can be specified as a single value in seconds or as a dictionary
range with ' start ' and ' end ' values .
range
This will schedule the command within the range specified .
The range parameter must be a dictionary with the date strings
using the dateutil format . Requires python - dateutil .
once
This will schedule a job to run once on the specified date .
once _ fmt
The default date format is ISO 8601 but can be overridden by
also specifying the ` ` once _ fmt ` ` option .
enabled
Whether the job should be enabled or disabled . Value should be a boolean .
return _ job
Whether to return information to the Salt master upon job completion .
metadata
Using the metadata parameter special values can be associated with
a scheduled job . These values are not used in the execution of the job ,
but can be used to search for specific jobs later if combined with the
return _ job parameter . The metadata parameter must be specified as a
dictionary , othewise it will be ignored .
returner
The returner to use to return the results of the scheduled job .
return _ config
The alternative configuration to use for returner configuration options .
return _ kwargs
Any individual returner configuration items to override . Should be passed
as a dictionary .
persist
Whether the job should persist between minion restarts , defaults to True .
skip _ during _ range
This will ensure that the scheduled command does not run within the
range specified . The range parameter must be a dictionary with the
date strings using the dateutil format . Requires python - dateutil .
run _ after _ skip _ range
Whether the job should run immediately after the skip _ during _ range time
period ends .''' | ret = { 'name' : name , 'result' : True , 'changes' : { } , 'comment' : [ ] }
current_schedule = __salt__ [ 'schedule.list' ] ( show_all = True , return_yaml = False )
if name in current_schedule :
new_item = __salt__ [ 'schedule.build_schedule_item' ] ( name , ** kwargs )
# See if the new _ item is valid
if isinstance ( new_item , dict ) :
if 'result' in new_item and not new_item [ 'result' ] :
ret [ 'result' ] = new_item [ 'result' ]
ret [ 'comment' ] = new_item [ 'comment' ]
return ret
# The schedule . list gives us an item that is guaranteed to have an
# ' enabled ' argument . Before comparing , add ' enabled ' if it ' s not
# available ( assume True , like schedule . list does )
if 'enabled' not in new_item :
new_item [ 'enabled' ] = True
if new_item == current_schedule [ name ] :
ret [ 'comment' ] . append ( 'Job {0} in correct state' . format ( name ) )
else :
if 'test' in __opts__ and __opts__ [ 'test' ] :
kwargs [ 'test' ] = True
result = __salt__ [ 'schedule.modify' ] ( name , ** kwargs )
ret [ 'comment' ] . append ( result [ 'comment' ] )
ret [ 'changes' ] = result [ 'changes' ]
else :
result = __salt__ [ 'schedule.modify' ] ( name , ** kwargs )
if not result [ 'result' ] :
ret [ 'result' ] = result [ 'result' ]
ret [ 'comment' ] = result [ 'comment' ]
return ret
else :
ret [ 'comment' ] . append ( 'Modifying job {0} in schedule' . format ( name ) )
ret [ 'changes' ] = result [ 'changes' ]
else :
if 'test' in __opts__ and __opts__ [ 'test' ] :
kwargs [ 'test' ] = True
result = __salt__ [ 'schedule.add' ] ( name , ** kwargs )
ret [ 'comment' ] . append ( result [ 'comment' ] )
else :
result = __salt__ [ 'schedule.add' ] ( name , ** kwargs )
if not result [ 'result' ] :
ret [ 'result' ] = result [ 'result' ]
ret [ 'comment' ] = result [ 'comment' ]
return ret
else :
ret [ 'comment' ] . append ( 'Adding new job {0} to schedule' . format ( name ) )
ret [ 'comment' ] = '\n' . join ( ret [ 'comment' ] )
return ret |
def get_service_state ( self , service_id : str ) -> str :
"""Get the state of the service .
Only the manager nodes can retrieve service state
Args :
service _ id ( str ) : Service id
Returns :
str , state of the service""" | # Get service
service = self . _client . services . get ( service_id )
# Get the state of the service
for service_task in service . tasks ( ) :
service_state = service_task [ 'DesiredState' ]
return service_state |
def diff ( ctx , branch ) :
"""Determine which tests intersect a git diff .""" | diff = GitDiffReporter ( branch )
regions = diff . changed_intervals ( )
_report_from_regions ( regions , ctx . obj , file_factory = diff . old_file ) |
def build ( self , builder ) :
"""Build XML by appending to builder""" | params = dict ( OID = self . oid , Name = self . name )
if self . unit_dictionary_name :
params [ "mdsol:UnitDictionaryName" ] = self . unit_dictionary_name
for suffix in [ "A" , "B" , "C" , "K" ] :
val = getattr ( self , "constant_{0}" . format ( suffix . lower ( ) ) )
params [ "mdsol:Constant{0}" . format ( suffix ) ] = str ( val )
if self . standard_unit :
params [ "mdsol:StandardUnit" ] = "Yes"
builder . start ( "MeasurementUnit" , params )
for child in self . symbols :
child . build ( builder )
builder . end ( "MeasurementUnit" ) |
def register_client ( provider_info , redirect_uris ) :
"""This function registers a new client with the specified OpenID Provider ,
and then returns the regitered client ID and other information .
: param provider _ info : The contents of the discovery endpoint as
specified by the OpenID Connect Discovery 1.0 specifications .
: type provider _ info : dict
: param redirect _ uris : The redirect URIs the application wants to
register .
: type redirect _ uris : list
: returns : An object containing the information needed to configure the
actual client code to communicate with the OpenID Provider .
: rtype : dict
: raises ValueError : The same error as used by check _ redirect _ uris .
: raises RegistrationError : Indicates an error was returned by the OpenID
Provider during registration .
. . versionadded : : 1.0""" | client_type = check_redirect_uris ( redirect_uris )
submit_info = { 'redirect_uris' : redirect_uris , 'application_type' : client_type , 'token_endpoint_auth_method' : 'client_secret_post' }
headers = { 'Content-type' : 'application/json' }
resp , content = httplib2 . Http ( ) . request ( provider_info [ 'registration_endpoint' ] , 'POST' , json . dumps ( submit_info ) , headers = headers )
if int ( resp [ 'status' ] ) >= 400 :
raise Exception ( 'Error: the server returned HTTP ' + resp [ 'status' ] )
client_info = _json_loads ( content )
if 'error' in client_info :
raise Exception ( 'Error occured during registration: %s (%s)' % ( client_info [ 'error' ] , client_info . get ( 'error_description' ) ) )
json_file = { 'web' : { 'client_id' : client_info [ 'client_id' ] , 'client_secret' : client_info [ 'client_secret' ] , 'auth_uri' : provider_info [ 'authorization_endpoint' ] , 'token_uri' : provider_info [ 'token_endpoint' ] , 'userinfo_uri' : provider_info [ 'userinfo_endpoint' ] , 'redirect_uris' : redirect_uris , 'issuer' : provider_info [ 'issuer' ] , } }
return json_file |
def run ( self , module , options ) :
"""Run the operator .
: param module : The target module path .
: type module : ` ` str ` `
: param options : Any runtime options .
: type options : ` ` dict ` `
: return : The operator results .
: rtype : ` ` dict ` `""" | logger . debug ( "Running CC harvester" )
results = { }
for filename , details in dict ( self . harvester . results ) . items ( ) :
results [ filename ] = { }
total = 0
# running CC total
for instance in details :
if isinstance ( instance , Class ) :
i = self . _dict_from_class ( instance )
elif isinstance ( instance , Function ) :
i = self . _dict_from_function ( instance )
else :
if isinstance ( instance , str ) and instance == "error" :
logger . warning ( f"Failed to run CC harvester on {filename} : {details['error']}" )
continue
else :
logger . warning ( f"Unexpected result from Radon : {instance} of {type(instance)}. Please report on Github." )
continue
results [ filename ] [ i [ "fullname" ] ] = i
del i [ "fullname" ]
total += i [ "complexity" ]
results [ filename ] [ "complexity" ] = total
return results |
def convert_tuple_to_integer ( numbers ) :
"""This function transforms a tuple of positive integers into a single integer .
> > > convert _ tuple _ to _ integer ( ( 1 , 2 , 3 ) )
123
> > > convert _ tuple _ to _ integer ( ( 4 , 5 , 6 ) )
456
> > > convert _ tuple _ to _ integer ( ( 5 , 6 , 7 ) )
567""" | converted_number = int ( '' . join ( str ( num ) for num in numbers ) )
return converted_number |
def add_user ( self , user , ** kwargs ) :
"""Add a user to this team .""" | if isinstance ( user , User ) :
user = user [ 'id' ]
assert isinstance ( user , six . string_types )
endpoint = '{0}/{1}/users/{2}' . format ( self . endpoint , self [ 'id' ] , user , )
result = self . request ( 'PUT' , endpoint = endpoint , query_params = kwargs )
return result |
def _verify ( self , request , return_payload = False , verify = True , raise_missing = False , request_args = None , request_kwargs = None , * args , ** kwargs ) :
"""Verify that a request object is authenticated .""" | try :
token = self . _get_token ( request )
is_valid = True
reason = None
except ( exceptions . MissingAuthorizationCookie , exceptions . MissingAuthorizationQueryArg , exceptions . MissingAuthorizationHeader , ) as e :
token = None
is_valid = False
reason = list ( e . args )
status = e . status_code if self . config . debug ( ) else 401
if raise_missing :
if not self . config . debug ( ) :
e . status_code = 401
raise e
if token :
try :
payload = self . _decode ( token , verify = verify )
if verify :
if self . _extra_verifications :
self . _verify_extras ( payload )
if self . _custom_claims :
self . _verify_custom_claims ( payload )
except ( jwt . exceptions . ExpiredSignatureError , jwt . exceptions . InvalidIssuerError , jwt . exceptions . ImmatureSignatureError , jwt . exceptions . InvalidIssuedAtError , jwt . exceptions . InvalidAudienceError , InvalidVerificationError , InvalidCustomClaimError , ) as e : # Make sure that the reasons all end with ' . ' for consistency
reason = [ x if x . endswith ( "." ) else "{}." . format ( x ) for x in list ( e . args ) ]
payload = None
status = 401
is_valid = False
except jwt . exceptions . DecodeError as e :
self . _reasons = e . args
# Make sure that the reasons all end with ' . ' for consistency
reason = ( [ x if x . endswith ( "." ) else "{}." . format ( x ) for x in list ( e . args ) ] if self . config . debug ( ) else "Auth required." )
logger . debug ( e . args )
is_valid = False
payload = None
status = 400 if self . config . debug ( ) else 401
else :
payload = None
if return_payload :
return payload
status = 200 if is_valid else status
return is_valid , status , reason |
def update_hosts ( self , host_names ) :
"""Primarily for puppet - unity use .
Update the hosts for the lun if needed .
: param host _ names : specify the new hosts which access the LUN .""" | if self . host_access :
curr_hosts = [ access . host . name for access in self . host_access ]
else :
curr_hosts = [ ]
if set ( curr_hosts ) == set ( host_names ) :
log . info ( 'Hosts for updating is equal to current hosts, ' 'skip modification.' )
return None
new_hosts = [ UnityHostList . get ( cli = self . _cli , name = host_name ) [ 0 ] for host_name in host_names ]
new_access = [ { 'host' : item , 'accessMask' : HostLUNAccessEnum . PRODUCTION } for item in new_hosts ]
resp = self . modify ( host_access = new_access )
resp . raise_if_err ( )
return resp |
def _parse_nexus_vni_range ( self , tunnel_range ) :
"""Raise an exception for invalid tunnel range or malformed range .""" | for ident in tunnel_range :
if not self . _is_valid_nexus_vni ( ident ) :
raise exc . NetworkTunnelRangeError ( tunnel_range = tunnel_range , error = _ ( "%(id)s is not a valid Nexus VNI value." ) % { 'id' : ident } )
if tunnel_range [ 1 ] < tunnel_range [ 0 ] :
raise exc . NetworkTunnelRangeError ( tunnel_range = tunnel_range , error = _ ( "End of tunnel range is less than start of " "tunnel range." ) ) |
def _normalize_images ( self ) :
"""normalizes image filenames by prepending ' File : ' if needed""" | if 'image' not in self . data :
return
for img in self . data [ 'image' ] :
fname = img [ 'file' ] . replace ( '_' , ' ' )
fstart = fname . startswith ( 'File:' )
istart = fname . startswith ( 'Image:' )
if not fstart and not istart :
fname = 'File:' + fname
img [ 'orig' ] = img [ 'file' ]
img [ 'file' ] = fname |
def get_read_format ( cls , source , args , kwargs ) :
"""Determine the read format for a given input source""" | ctx = None
if isinstance ( source , FILE_LIKE ) :
fileobj = source
filepath = source . name if hasattr ( source , 'name' ) else None
else :
filepath = source
try :
ctx = get_readable_fileobj ( filepath , encoding = 'binary' )
fileobj = ctx . __enter__ ( )
# pylint : disable = no - member
except IOError :
raise
except Exception : # pylint : disable = broad - except
fileobj = None
try :
return get_format ( 'read' , cls , filepath , fileobj , args , kwargs )
finally :
if ctx is not None :
ctx . __exit__ ( * sys . exc_info ( ) ) |
def create_data_descriptor ( self , is_sequence : bool , collection_dimension_count : int , datum_dimension_count : int ) -> DataAndMetadata . DataDescriptor :
"""Create a data descriptor .
: param is _ sequence : whether the descriptor describes a sequence of data .
: param collection _ dimension _ count : the number of collection dimensions represented by the descriptor .
: param datum _ dimension _ count : the number of datum dimensions represented by the descriptor .
. . versionadded : : 1.0
Scriptable : Yes""" | return DataAndMetadata . DataDescriptor ( is_sequence , collection_dimension_count , datum_dimension_count ) |
def get_weights_as_images ( weights_npy , width , height , outdir = 'img/' , n_images = 10 , img_type = 'grey' ) :
"""Create and save the weights of the hidden units as images .
: param weights _ npy : path to the weights . npy file
: param width : width of the images
: param height : height of the images
: param outdir : output directory
: param n _ images : number of images to generate
: param img _ type : ' grey ' or ' color ' ( RGB )""" | weights = np . load ( weights_npy )
perm = np . random . permutation ( weights . shape [ 1 ] ) [ : n_images ]
for p in perm :
w = np . array ( [ i [ p ] for i in weights ] )
image_path = outdir + 'w_{}.png' . format ( p )
gen_image ( w , width , height , image_path , img_type ) |
async def fetch_guild ( self , guild_id ) :
"""| coro |
Retrieves a : class : ` . Guild ` from an ID .
. . note : :
Using this , you will not receive : attr : ` . Guild . channels ` , : class : ` . Guild . members ` ,
: attr : ` . Member . activity ` and : attr : ` . Member . voice ` per : class : ` . Member ` .
. . note : :
This method is an API call . For general usage , consider : meth : ` get _ guild ` instead .
Parameters
guild _ id : : class : ` int `
The guild ' s ID to fetch from .
Raises
Forbidden
You do not have access to the guild .
HTTPException
Getting the guild failed .
Returns
: class : ` . Guild `
The guild from the ID .""" | data = await self . http . get_guild ( guild_id )
return Guild ( data = data , state = self . _connection ) |
def gettext ( ui_file_path ) :
"""Let you use gettext instead of the Qt tools for l18n""" | with open ( ui_file_path , 'r' ) as fin :
content = fin . read ( )
# replace ` ` _ translate ( " context " , ` ` by ` ` _ ( ` `
content = re . sub ( r'_translate\(".*",\s' , '_(' , content )
content = content . replace ( ' _translate = QtCore.QCoreApplication.translate' , '' )
with open ( ui_file_path , 'w' ) as fout :
fout . write ( content ) |
def get_processor ( self , processor_type : Type [ P ] ) -> P :
"""Get a Processor instance , by type .
This method returns a Processor instance by type . This could be
useful in certain situations , such as wanting to call a method on a
Processor , from within another Processor .
: param processor _ type : The type of the Processor you wish to retrieve .
: return : A Processor instance that has previously been added to the World .""" | for processor in self . _processors :
if type ( processor ) == processor_type :
return processor |
def locations_to_cache ( locations , latest = False ) :
"""Return a cumulative cache file build from the list of locations
Parameters
locations : list
A list of strings containing files , globs , or cache files used to build
a combined lal cache file object .
latest : Optional , { False , Boolean }
Only return a cache with the most recent frame in the locations .
If false , all results are returned .
Returns
cache : lal . Cache
A cumulative lal cache object containing the files derived from the
list of locations""" | cum_cache = lal . Cache ( )
for source in locations :
flist = glob . glob ( source )
if latest :
def relaxed_getctime ( fn ) : # when building a cache from a directory of temporary
# low - latency frames , files might disappear between
# the glob ( ) and getctime ( ) calls
try :
return os . path . getctime ( fn )
except OSError :
return 0
flist = [ max ( flist , key = relaxed_getctime ) ]
for file_path in flist :
dir_name , file_name = os . path . split ( file_path )
_ , file_extension = os . path . splitext ( file_name )
if file_extension in [ ".lcf" , ".cache" ] :
cache = lal . CacheImport ( file_path )
elif file_extension == ".gwf" or _is_gwf ( file_path ) :
cache = lalframe . FrOpen ( str ( dir_name ) , str ( file_name ) ) . cache
else :
raise TypeError ( "Invalid location name" )
cum_cache = lal . CacheMerge ( cum_cache , cache )
return cum_cache |
def new_backup ( self , src ) :
"""Create a new backup file allocation""" | backup_id_file = p . join ( self . backup_dir , '.bk_idx' )
backup_num = file_or_default ( backup_id_file , 1 , int )
backup_name = str ( backup_num ) + "_" + os . path . basename ( src )
backup_num += 1
file_put_contents ( backup_id_file , str ( backup_num ) )
return p . join ( self . backup_dir , backup_name ) |
def permission_required ( perm , queryset_or_model = None , login_url = None , raise_exception = False ) :
"""Permission check decorator for classbased / functional generic view
This decorator works as class , method or function decorator without any
modification .
DO NOT use ` ` method _ decorator ` ` or whatever while this decorator will use
` ` self ` ` argument for method of classbased generic view .
Parameters
perm : string
A permission string
queryset _ or _ model : queryset or model
A queryset or model for finding object .
With classbased generic view , ` ` None ` ` for using view default queryset .
When the view does not define ` ` get _ queryset ` ` , ` ` queryset ` ` ,
` ` get _ object ` ` , or ` ` object ` ` then ` ` obj = None ` ` is used to check
permission .
With functional generic view , ` ` None ` ` for using passed queryset .
When non queryset was passed then ` ` obj = None ` ` is used to check
permission .
Examples
> > > # As class decorator
> > > @ permission _ required ( ' auth . change _ user ' )
> > > class UpdateAuthUserView ( UpdateView ) :
. . . pass
> > > # As method decorator
> > > class UpdateAuthUserView ( UpdateView ) :
. . . @ permission _ required ( ' auth . change _ user ' )
. . . def dispatch ( self , request , * args , * * kwargs ) :
. . . pass
> > > # As function decorator
> > > @ permission _ required ( ' auth . change _ user ' )
> > > def update _ auth _ user ( request , * args , * * kwargs ) :
. . . pass
. . Note : :
Classbased generic view is recommended while you can regulate the queryset
with ` ` get _ queryset ( ) ` ` method .
Detecting object from passed kwargs may not work correctly .""" | # convert model to queryset
if queryset_or_model and issubclass ( queryset_or_model , Model ) :
queryset_or_model = queryset_or_model . _default_manager . all ( )
def wrapper ( class_or_method ) :
if inspect . isclass ( class_or_method ) :
from permission . decorators . classbase import permission_required as decorator
else : # method _ permission _ required can handle method or function
# correctly .
from permission . decorators . methodbase import permission_required as decorator
return decorator ( perm , queryset_or_model , login_url , raise_exception ) ( class_or_method )
return wrapper |
def start ( self , blocking = False ) :
"""Start the interface
: param blocking : Should the call block until stop ( ) is called
( default : False )
: type blocking : bool
: rtype : None""" | self . debug ( "()" )
# Start the plugins
for plugin in self . plugins :
try : # Inject self into plugin
plugin . controller = self
plugin . start ( blocking = False )
except :
self . exception ( u"Failed to start plugin {}" . format ( plugin . name ) )
raise PluginStartException ( "Starting one or more plugins failed" )
super ( CrowdController , self ) . start ( blocking = blocking ) |
def get_default ( cls , category ) :
"""Get the default value of a given category .""" | value = cls . _DEFAULTS [ category ]
if not value or not isinstance ( value , list ) :
return value
return value [ 0 ] |
def route_method ( method_name , extra_part = False ) :
"""Custom handler routing decorator .
Signs a web handler callable with the http method as attribute .
Args :
method _ name ( str ) : HTTP method name ( i . e GET , POST )
extra _ part ( bool ) : Indicates if wrapped callable name should be a part
of the actual endpoint .
Returns :
A wrapped handler callable .
examples :
> > > @ route _ method ( ' GET ' )
. . . def method ( ) :
. . . return " Hello ! "
> > > method . http _ method
' GET '
> > > method . url _ extra _ part
None""" | def wrapper ( callable_obj ) :
if method_name . lower ( ) not in DEFAULT_ROUTES :
raise HandlerHTTPMethodError ( 'Invalid http method in method: {}' . format ( method_name ) )
callable_obj . http_method = method_name . upper ( )
callable_obj . url_extra_part = callable_obj . __name__ if extra_part else None
return classmethod ( callable_obj )
return wrapper |
def calc_inuz_v1 ( self ) :
"""Accumulate the total inflow into the upper zone layer .
Required control parameters :
| NmbZones |
| ZoneType |
Required derived parameters :
| RelLandZoneArea |
Required fluxes sequences :
| CF |
Calculated flux sequence :
| InUZ |
Basic equation :
: math : ` InUZ = R - CF `
Examples :
Initialize three zones of different relative ` land sizes `
( area related to the total size of the subbasin except lake areas ) :
> > > from hydpy . models . hland import *
> > > parameterstep ( ' 1d ' )
> > > nmbzones ( 3)
> > > zonetype ( FIELD , ILAKE , GLACIER )
> > > derived . rellandzonearea = 2.0/3.0 , 0.0 , 1.0/3.0
> > > fluxes . r = 6.0 , 0.0 , 2.0
> > > fluxes . cf = 2.0 , 0.0 , 1.0
> > > model . calc _ inuz _ v1 ( )
> > > fluxes . inuz
inuz ( 3.0)
Internal lakes do not contribute to the upper zone layer . Hence
for a subbasin consisting only of interal lakes a zero input
value would be calculated :
> > > zonetype ( ILAKE , ILAKE , ILAKE )
> > > model . calc _ inuz _ v1 ( )
> > > fluxes . inuz
inuz ( 0.0)""" | con = self . parameters . control . fastaccess
der = self . parameters . derived . fastaccess
flu = self . sequences . fluxes . fastaccess
flu . inuz = 0.
for k in range ( con . nmbzones ) :
if con . zonetype [ k ] != ILAKE :
flu . inuz += der . rellandzonearea [ k ] * ( flu . r [ k ] - flu . cf [ k ] ) |
def listdir ( directory ) :
"""Returns list of nested files and directories for local directory by path
: param directory : absolute or relative path to local directory
: return : list nested of file or directory names""" | file_names = list ( )
for filename in os . listdir ( directory ) :
file_path = os . path . join ( directory , filename )
if os . path . isdir ( file_path ) :
filename = f'{filename}{os.path.sep}'
file_names . append ( filename )
return file_names |
def retrieve_log_trace ( self , filename = None , dir = None ) :
"""Retrieves the application log and trace files of the job
and saves them as a compressed tar file .
An existing file with the same name will be overwritten .
Args :
filename ( str ) : name of the created tar file . Defaults to ` job _ < id > _ < timestamp > . tar . gz ` where ` id ` is the job identifier and ` timestamp ` is the number of seconds since the Unix epoch , for example ` ` job _ 355_1511995995 . tar . gz ` ` .
dir ( str ) : a valid directory in which to save the archive . Defaults to the current directory .
Returns :
str : the path to the created tar file , or ` ` None ` ` if retrieving a job ' s logs is not supported in the version of IBM Streams to which the job is submitted .
. . versionadded : : 1.8""" | if hasattr ( self , "applicationLogTrace" ) and self . applicationLogTrace is not None :
logger . debug ( "Retrieving application logs from: " + self . applicationLogTrace )
if not filename :
filename = _file_name ( 'job' , self . id , '.tar.gz' )
return self . rest_client . _retrieve_file ( self . applicationLogTrace , filename , dir , 'application/x-compressed' )
else :
return None |
def json_register ( self ) :
'''The first char of ' code ' stands for the different field .
'1 ' for user _ name
'2 ' for user _ email
'3 ' for user _ pass
'4 ' for user _ role
The seconde char of ' code ' stands for different status .
'1 ' for invalide
'2 ' for already exists .''' | # user _ create _ status = { ' success ' : False , ' code ' : ' 00 ' }
post_data = self . get_post_data ( )
user_create_status = self . __check_valid ( post_data )
if not user_create_status [ 'success' ] :
return json . dump ( user_create_status , self )
form = SumForm ( self . request . arguments )
if form . validate ( ) :
user_create_status = MUser . create_user ( post_data )
logger . info ( 'user_register_status: {0}' . format ( user_create_status ) )
return json . dump ( user_create_status , self )
return json . dump ( user_create_status , self ) |
def create_initialized_contract_account ( self , contract_code , storage ) -> None :
"""Creates a new contract account , based on the contract code and
storage provided The contract code only includes the runtime contract
bytecode .
: param contract _ code : Runtime bytecode for the contract
: param storage : Initial storage for the contract
: return : The new account""" | # TODO : Add type hints
new_account = Account ( self . _generate_new_address ( ) , code = contract_code , balance = 0 )
new_account . storage = storage
self . _put_account ( new_account ) |
def remove_actor ( self , actor , reset_camera = False ) :
"""Removes an actor from the Renderer .
Parameters
actor : vtk . vtkActor
Actor that has previously added to the Renderer .
reset _ camera : bool , optional
Resets camera so all actors can be seen .
Returns
success : bool
True when actor removed . False when actor has not been
removed .""" | name = None
if isinstance ( actor , str ) :
name = actor
keys = list ( self . _actors . keys ( ) )
names = [ ]
for k in keys :
if k . startswith ( '{}-' . format ( name ) ) :
names . append ( k )
if len ( names ) > 0 :
self . remove_actor ( names , reset_camera = reset_camera )
try :
actor = self . _actors [ name ]
except KeyError : # If actor of that name is not present then return success
return False
if isinstance ( actor , collections . Iterable ) :
success = False
for a in actor :
rv = self . remove_actor ( a , reset_camera = reset_camera )
if rv or success :
success = True
return success
if actor is None :
return False
# First remove this actor ' s mapper from _ scalar _ bar _ mappers
_remove_mapper_from_plotter ( self . parent , actor , False )
self . RemoveActor ( actor )
if name is None :
for k , v in self . _actors . items ( ) :
if v == actor :
name = k
self . _actors . pop ( name , None )
self . update_bounds_axes ( )
if reset_camera :
self . reset_camera ( )
elif not self . camera_set and reset_camera is None :
self . reset_camera ( )
else :
self . parent . _render ( )
return True |
def do_complete ( self , code , cursor_pos ) :
"""Method called on autocompletion requests""" | self . _klog . info ( "{%s}" , code [ cursor_pos : cursor_pos + 10 ] )
token , start = token_at_cursor ( code , cursor_pos )
tkn_low = token . lower ( )
if is_magic ( token , start , code ) :
matches = [ k for k in magics . keys ( ) if k . startswith ( tkn_low ) ]
else :
matches = [ sparql_names [ k ] for k in sparql_names if k . startswith ( tkn_low ) ]
self . _klog . debug ( "token={%s} matches={%r}" , token , matches )
if matches :
return { 'status' : 'ok' , 'cursor_start' : start , 'cursor_end' : start + len ( token ) , 'matches' : matches } |
def submit_report ( self , report ) :
"""Submits a report .
* If ` ` report . is _ enclave ` ` is ` ` True ` ` , then the report will be submitted to the enclaves
identified by ` ` report . enclaves ` ` ; if that field is ` ` None ` ` , then the enclave IDs registered with this
| TruStar | object will be used .
* If ` ` report . time _ began ` ` is ` ` None ` ` , then the current time will be used .
: param report : The | Report | object that was submitted , with the ` ` id ` ` field updated based
on values from the response .
Example :
> > > report = Report ( title = " Suspicious Activity " ,
> > > body = " We have been receiving suspicious requests from 169.178.68.63 . " ,
> > > enclave _ ids = [ " 602d4795-31cd - 44f9 - a85d - f33cb869145a " ] )
> > > report = ts . submit _ report ( report )
> > > print ( report . id )
ac6a0d17-7350-4410 - bc57-9699521db992
> > > print ( report . title )
Suspicious Activity""" | # make distribution type default to " enclave "
if report . is_enclave is None :
report . is_enclave = True
if report . enclave_ids is None : # use configured enclave _ ids by default if distribution type is ENCLAVE
if report . is_enclave :
report . enclave_ids = self . enclave_ids
# if distribution type is COMMUNITY , API still expects non - null list of enclaves
else :
report . enclave_ids = [ ]
if report . is_enclave and len ( report . enclave_ids ) == 0 :
raise Exception ( "Cannot submit a report of distribution type 'ENCLAVE' with an empty set of enclaves." )
# default time began is current time
if report . time_began is None :
report . set_time_began ( datetime . now ( ) )
data = json . dumps ( report . to_dict ( ) )
resp = self . _client . post ( "reports" , data = data , timeout = 60 )
# get report id from response body
report_id = resp . content
if isinstance ( report_id , bytes ) :
report_id = report_id . decode ( 'utf-8' )
report . id = report_id
return report |
def resume ( self ) :
"""Resumes the thread execution .
@ rtype : int
@ return : Suspend count . If zero , the thread is running .""" | hThread = self . get_handle ( win32 . THREAD_SUSPEND_RESUME )
return win32 . ResumeThread ( hThread ) |
def group_paragraphs ( indent_paragraphs ) :
'''Group paragraphs so that more indented paragraphs become children of less
indented paragraphs .''' | # The tree consists of tuples of the form ( indent , [ children ] ) where the
# children may be strings or other tuples
root = Node ( 0 , [ ] , None )
current_node = root
previous_indent = - 1
for indent , lines in indent_paragraphs :
if indent > previous_indent :
current_node = create_child_node ( current_node , indent , lines )
elif indent == previous_indent :
current_node = create_sibling_node ( current_node , indent , lines )
elif indent < previous_indent :
current_node = create_uncle_node ( current_node , indent , lines )
previous_indent = indent
return root |
def get_controller_info_records ( self ) :
"""Get the info records for all the controller objects in the manager .
New info records for each controller object are created for every call
so the latest info is included .
Returns :
List of records . ControllerInfoRecord objects . Each opject conatins
the info of a type of controller""" | info_records = [ ]
for controller_module_name in self . _controller_objects . keys ( ) :
with expects . expect_no_raises ( 'Failed to collect controller info from %s' % controller_module_name ) :
record = self . _create_controller_info_record ( controller_module_name )
if record :
info_records . append ( record )
return info_records |
def incr ( self , conn , key , increment = 1 ) :
"""Command is used to change data for some item in - place ,
incrementing it . The data for the item is treated as decimal
representation of a 64 - bit unsigned integer .
: param key : ` ` bytes ` ` , is the key of the item the client wishes
to change
: param increment : ` ` int ` ` , is the amount by which the client
wants to increase the item .
: return : ` ` int ` ` , new value of the item ' s data ,
after the increment or ` ` None ` ` to indicate the item with
this value was not found""" | assert self . _validate_key ( key )
resp = yield from self . _incr_decr ( conn , b'incr' , key , increment )
return resp |
def load_from_string ( self , content , container , ** kwargs ) :
"""Load config from given string ' cnf _ content ' .
: param content : Config content string
: param container : callble to make a container object later
: param kwargs : optional keyword parameters to be sanitized : : dict
: return : Dict - like object holding config parameters""" | return self . load_from_stream ( anyconfig . compat . StringIO ( content ) , container , ** kwargs ) |
def secgroup_delete ( name , profile = None , ** kwargs ) :
'''Delete a secgroup to nova ( nova secgroup - delete )
CLI Example :
. . code - block : : bash
salt ' * ' nova . secgroup _ delete mygroup''' | conn = _auth ( profile , ** kwargs )
return conn . secgroup_delete ( name ) |
def set_to_restart ( self , instance ) :
"""Put an instance to the restart queue
: param instance : instance to restart
: type instance : object
: return : None""" | self . to_restart . append ( instance )
if instance . is_external :
instance . proc = None |
def set_logger ( self , logger ) :
"""subscribe to fortran log messages""" | # we don ' t expect anything back
try :
self . library . set_logger . restype = None
except AttributeError :
logger . warn ( "Tried to set logger but method is not implemented in %s" , self . engine )
return
# as an argument we need a pointer to a fortran log func . . .
self . library . set_logger . argtypes = [ ( fortran_log_functype ) ]
self . library . set_logger ( fortran_log_func ) |
def NewFont ( familyName = None , styleName = None , showInterface = True ) :
"""Create a new font . * * familyName * * will be assigned
to ` ` font . info . familyName ` ` and * * styleName * *
will be assigned to ` ` font . info . styleName ` ` . These
are optional and default to ` ` None ` ` . If * * showInterface * *
is ` ` False ` ` , the font should be created without
graphical interface . The default for * * showInterface * *
is ` ` True ` ` .
from fontParts . world import *
font = NewFont ( )
font = NewFont ( familyName = " My Family " , styleName = " My Style " )
font = NewFont ( showInterface = False )""" | return dispatcher [ "NewFont" ] ( familyName = familyName , styleName = styleName , showInterface = showInterface ) |
def filterMsgs ( self , wrappedMsgs : deque ) -> deque :
"""Filters messages by view number so that only the messages that have the
current view number are retained .
: param wrappedMsgs : the messages to filter""" | filtered = deque ( )
while wrappedMsgs :
wrappedMsg = wrappedMsgs . popleft ( )
msg , sender = wrappedMsg
if hasattr ( msg , f . VIEW_NO . nm ) :
reqViewNo = getattr ( msg , f . VIEW_NO . nm )
if reqViewNo == self . viewNo :
filtered . append ( wrappedMsg )
else :
self . discard ( wrappedMsg , "its view no {} is less than the elector's {}" . format ( reqViewNo , self . viewNo ) , logger . debug )
else :
filtered . append ( wrappedMsg )
return filtered |
def variables ( self ) -> tuple :
"""Variables .""" | try :
assert self . _variables is not None
except ( AssertionError , AttributeError ) :
self . _variables = [ self [ n ] for n in self . variable_names ]
finally :
return tuple ( self . _variables ) |
def filter_kwargs ( _function , * args , ** kwargs ) :
"""Given a function and args and keyword args to pass to it , call the function
but using only the keyword arguments which it accepts . This is equivalent
to redefining the function with an additional \ * \ * kwargs to accept slop
keyword args .
If the target function already accepts \ * \ * kwargs parameters , no filtering
is performed .
Parameters
_ function : callable
Function to call . Can take in any number of args or kwargs""" | if has_kwargs ( _function ) :
return _function ( * args , ** kwargs )
# Get the list of function arguments
func_code = six . get_function_code ( _function )
function_args = func_code . co_varnames [ : func_code . co_argcount ]
# Construct a dict of those kwargs which appear in the function
filtered_kwargs = { }
for kwarg , value in list ( kwargs . items ( ) ) :
if kwarg in function_args :
filtered_kwargs [ kwarg ] = value
# Call the function with the supplied args and the filtered kwarg dict
return _function ( * args , ** filtered_kwargs ) |
def end ( self ) :
"""This method must be called after the operation returns .
Note that this method is not to be invoked by the user ; it is invoked
by the implementation of the : class : ` ~ zhmcclient . Session ` class .
If the statistics keeper holding this time statistics is enabled , this
method takes the current time , calculates the duration of the operation
since the last call to : meth : ` ~ zhmcclient . TimeStats . begin ` , and updates
the time statistics to reflect the new operation .
If the statistics keeper holding this time statistics is disabled ,
this method does nothing , in order to save resources .
If this method is called without a preceding call to
: meth : ` ~ zhmcclient . TimeStats . begin ` , a : exc : ` py : RuntimeError ` is
raised .
Raises :
RuntimeError""" | if self . keeper . enabled :
if self . _begin_time is None :
raise RuntimeError ( "end() called without preceding begin()" )
dt = time . time ( ) - self . _begin_time
self . _begin_time = None
self . _count += 1
self . _sum += dt
if dt > self . _max :
self . _max = dt
if dt < self . _min :
self . _min = dt |
def generate_rpcs ( self , address ) :
"""Generate the RPCs needed to stream this config variable to a tile .
Args :
address ( int ) : The address of the tile that we should stream to .
Returns :
list of tuples : A list of argument tuples for each RPC .
These tuples can be passed to EmulatedDevice . rpc to actually make
the RPCs .""" | rpc_list = [ ]
for offset in range ( 2 , len ( self . data ) , 16 ) :
rpc = ( address , rpcs . SET_CONFIG_VARIABLE , self . var_id , offset - 2 , self . data [ offset : offset + 16 ] )
rpc_list . append ( rpc )
return rpc_list |
def getAttribute ( self , attr : str ) -> _AttrValueType :
"""Get attribute of this node as string format .
If this node does not have ` ` attr ` ` , return None .""" | if attr == 'class' :
if self . classList :
return self . classList . toString ( )
return None
attr_node = self . getAttributeNode ( attr )
if attr_node is None :
return None
return attr_node . value |
def get_wordlist ( stanzas ) :
"""Get an iterable of all final words in all stanzas""" | return sorted ( list ( set ( ) . union ( * [ stanza . words for stanza in stanzas ] ) ) ) |
def cli ( ctx , stage ) :
"""listen to push requests for src and pull requests from target ( experimental )""" | if not ctx . bubble :
ctx . say_yellow ( 'There is no bubble present, will not listen' )
raise click . Abort ( )
SRC = None
if stage in STAGES :
try :
SRC = ctx . cfg . CFG [ stage ] . SOURCE
except KeyError :
pass
if not SRC :
ctx . say_red ( 'There is no SOURCE in stage:' + stage )
ctx . say_yellow ( 'please check configuration in ' + ctx . home + '/config/config.yaml' )
raise click . Abort ( )
if 'SERVER' not in SRC :
ctx . say_red ( 'There is no SOURCE.SERVER in stage:' + stage )
raise click . Abort ( )
src_server = get_server ( SRC . SERVER , ctx . home )
# connect storage / pipeline to target via transform
# write state listening on port etc into
def message_handler ( ** m ) :
print ( str ( arrow . now ) , str ( m ) )
return True , 'handled'
try : # TODO : bg &
# src _ listening = src _ server . listen ( cfg = SRC ,
src_server . listen ( cfg = SRC , push_handler = message_handler , pull_handler = message_handler )
except Exception as e :
ctx . say_red ( 'cannot listen from source client bubble.clients.' + SRC . SERVER )
ctx . say_red ( str ( e ) )
raise click . Abort ( 'cannot listen' ) |
def count ( cls , iterable ) :
"""Returns the number of items in an iterable .""" | iterable = iter ( iterable )
count = 0
while True :
try :
next ( iterable )
except StopIteration :
break
count += 1
return count |
def _collect_data ( self ) :
"""Returns a list of all the data gathered from the engine
iterable .""" | all_data = [ ]
for line in self . engine . run_engine ( ) :
logging . debug ( "Adding {} to all_data" . format ( line ) )
all_data . append ( line . copy ( ) )
logging . debug ( "all_data is now {}" . format ( all_data ) )
return all_data |
def symmetry_looking ( x , param ) :
"""Boolean variable denoting if the distribution of x * looks symmetric * . This is the case if
. . math : :
| mean ( X ) - median ( X ) | < r * ( max ( X ) - min ( X ) )
: param x : the time series to calculate the feature of
: type x : numpy . ndarray
: param r : the percentage of the range to compare with
: type r : float
: return : the value of this feature
: return type : bool""" | if not isinstance ( x , ( np . ndarray , pd . Series ) ) :
x = np . asarray ( x )
mean_median_difference = np . abs ( np . mean ( x ) - np . median ( x ) )
max_min_difference = np . max ( x ) - np . min ( x )
return [ ( "r_{}" . format ( r [ "r" ] ) , mean_median_difference < ( r [ "r" ] * max_min_difference ) ) for r in param ] |
def path_to_pattern ( path , metadata = None ) :
"""Remove source information from path when using chaching
Returns None if path is not str
Parameters
path : str
Path to data optionally containing format _ strings
metadata : dict , optional
Extra arguments to the class , contains any cache information
Returns
pattern : str
Pattern style path stripped of everything to the left of cache regex .""" | if not isinstance ( path , str ) :
return
pattern = path
if metadata :
cache = metadata . get ( 'cache' )
if cache :
regex = next ( c . get ( 'regex' ) for c in cache if c . get ( 'argkey' ) == 'urlpath' )
pattern = pattern . split ( regex ) [ - 1 ]
return pattern |
def done ( self , result ) :
"""save the geometry before dialog is close to restore it later""" | self . _geometry = self . geometry ( )
QtWidgets . QDialog . done ( self , result ) |
def json_to_entity ( tc_data , value_fields , resource_type , resource_type_parent ) :
"""Convert ThreatConnect JSON response to a TCEntityArray .
. . Attention : : This method is subject to frequent changes .
Args :
tc _ data ( dictionary ) : Array of data returned from TC API call .
value _ fields ( list ) : Field names that contain the " value " data .
resource _ type ( string ) : The resource type of the tc _ data provided .
resource _ type _ parent ( string ) : The resource parent type of the tc _ data provided .
Returns :
( list ) : A list representing a TCEntityArray .""" | if not isinstance ( tc_data , list ) :
tc_data = [ tc_data ]
entity_array = [ ]
for d in tc_data :
entity = { 'id' : d . get ( 'id' ) , 'webLink' : d . get ( 'webLink' ) }
# value
values = [ ]
if 'summary' in d :
values . append ( d . get ( 'summary' ) )
else :
for field in value_fields :
if d . get ( field ) is not None :
values . append ( d . get ( field ) )
entity [ 'value' ] = ' : ' . join ( values )
# type
if d . get ( 'type' ) is not None :
entity [ 'type' ] = d . get ( 'type' )
else :
entity [ 'type' ] = resource_type
if resource_type_parent in [ 'Indicator' ] :
entity [ 'confidence' ] = d . get ( 'confidence' )
entity [ 'rating' ] = d . get ( 'rating' )
entity [ 'threatAssessConfidence' ] = d . get ( 'threatAssessConfidence' )
entity [ 'threatAssessRating' ] = d . get ( 'threatAssessRating' )
entity [ 'dateLastModified' ] = d . get ( 'lastModified' )
if resource_type_parent in [ 'Indicator' , 'Group' ] :
if 'owner' in d :
entity [ 'ownerName' ] = d [ 'owner' ] [ 'name' ]
else :
entity [ 'ownerName' ] = d . get ( 'ownerName' )
entity [ 'dateAdded' ] = d . get ( 'dateAdded' )
if resource_type_parent in [ 'Victim' ] :
entity [ 'ownerName' ] = d . get ( 'org' )
entity_array . append ( entity )
return entity_array |
def remove_callback ( self , signal_name , callback ) :
""": meth : ` . WSignalSourceProto . remove _ callback ` implementation""" | try :
self . __direct_callbacks [ signal_name ] . remove ( callback )
except KeyError :
raise ValueError ( 'Signal "%s" does not have the specified callback' % signal_name ) |
def is_training_name ( name ) :
"""* * Guess * * if this variable is only used in training .
Only used internally to avoid too many logging . Do not use it .""" | # TODO : maybe simply check against TRAINABLE _ VARIABLES and MODEL _ VARIABLES ?
# TODO or use get _ slot _ names ( )
name = get_op_tensor_name ( name ) [ 0 ]
if name . endswith ( '/Adam' ) or name . endswith ( '/Adam_1' ) :
return True
if name . endswith ( '/Momentum' ) :
return True
if name . endswith ( '/Adadelta' ) or name . endswith ( '/Adadelta_1' ) :
return True
if name . endswith ( '/RMSProp' ) or name . endswith ( '/RMSProp_1' ) :
return True
if name . endswith ( '/Adagrad' ) :
return True
if name . startswith ( 'EMA/' ) or '/EMA/' in name : # all the moving average summaries
return True
if name . startswith ( 'AccumGrad' ) or name . endswith ( '/AccumGrad' ) :
return True
if name . startswith ( 'apply_gradients' ) :
return True
return False |
def _pop_digits ( char_list ) :
"""Pop consecutive digits from the front of list and return them
Pops any and all consecutive digits from the start of the provided
character list and returns them as a list of string digits .
Operates on ( and possibly alters ) the passed list .
: param list char _ list : a list of characters
: return : a list of string digits
: rtype : list""" | logger . debug ( '_pop_digits(%s)' , char_list )
digits = [ ]
while len ( char_list ) != 0 and char_list [ 0 ] . isdigit ( ) :
digits . append ( char_list . pop ( 0 ) )
logger . debug ( 'got digits: %s' , digits )
logger . debug ( 'updated char list: %s' , char_list )
return digits |
def ensure_dict_key ( in_dict , keys , delimiter = DEFAULT_TARGET_DELIM , ordered_dict = False ) :
'''Ensures that in _ dict contains the series of recursive keys defined in keys .
: param dict in _ dict : The dict to work with .
: param str keys : The delimited string with one or more keys .
: param str delimiter : The delimiter to use in ` keys ` . Defaults to ' : ' .
: param bool ordered _ dict : Create OrderedDicts if keys are missing .
Default : create regular dicts .''' | if delimiter in keys :
a_keys = keys . split ( delimiter )
else :
a_keys = [ keys ]
dict_pointer = in_dict
while a_keys :
current_key = a_keys . pop ( 0 )
if current_key not in dict_pointer or not isinstance ( dict_pointer [ current_key ] , dict ) :
dict_pointer [ current_key ] = OrderedDict ( ) if ordered_dict else { }
dict_pointer = dict_pointer [ current_key ]
return in_dict |
def file_query_size ( self , path , follow_symlinks ) :
"""Queries the size of a regular file in the guest .
in path of type str
Path to the file which size is requested . Guest path style .
in follow _ symlinks of type bool
It @ c true , symbolic links in the final path component will be
followed to their target , and the size of the target is returned .
If @ c false , symbolic links in the final path component will make
the method call fail ( symblink is not a regular file ) .
return size of type int
Queried file size .
raises : class : ` VBoxErrorObjectNotFound `
File to was not found .
raises : class : ` VBoxErrorIprtError `
Error querying file size .""" | if not isinstance ( path , basestring ) :
raise TypeError ( "path can only be an instance of type basestring" )
if not isinstance ( follow_symlinks , bool ) :
raise TypeError ( "follow_symlinks can only be an instance of type bool" )
size = self . _call ( "fileQuerySize" , in_p = [ path , follow_symlinks ] )
return size |
def radio_field ( * args , ** kwargs ) :
'''Get a password''' | radio_field = wtforms . RadioField ( * args , ** kwargs )
radio_field . input_type = 'radio_field'
return radio_field |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.