signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _from_dict ( cls , _dict ) :
"""Initialize a DialogNodeOutput object from a json dictionary .""" | args = { }
xtra = _dict . copy ( )
if 'generic' in _dict :
args [ 'generic' ] = [ DialogNodeOutputGeneric . _from_dict ( x ) for x in ( _dict . get ( 'generic' ) ) ]
del xtra [ 'generic' ]
if 'modifiers' in _dict :
args [ 'modifiers' ] = DialogNodeOutputModifiers . _from_dict ( _dict . get ( 'modifiers' ) )
del xtra [ 'modifiers' ]
args . update ( xtra )
return cls ( ** args ) |
def create_bulk_device_enrollment ( self , enrollment_identities , ** kwargs ) : # noqa : E501
"""Bulk upload # noqa : E501
With bulk upload , you can upload a ` CSV ` file containing a number of enrollment IDs . * * Example usage : * * ` ` ` curl - X POST \\ - H ' Authorization : Bearer < valid access token > ' \\ - F ' enrollment _ identities = @ / path / to / enrollments / enrollments . csv ' \\ https : / / api . us - east - 1 . mbedcloud . com / v3 / device - enrollments - bulk - uploads ` ` ` * * An example ` CSV ` file : * * 1 . The first line is assumed to be the header . The content of the header is not validated . 2 . Each line can contain comma - separated values , where the first value is always assumed to be the Enrollment ID . 3 . Only one enrollment ID is expected per line . 4 . Valid enrollments begin with A followed by a - and 95 characters in the format as below . 5 . Valid enrollment identities may be enclosed within quotes . 6 . UTF - 8 encoding is expected . ` ` ` \" enrollment _ identity \" \" A - 4E : 63:2D : AE : 14 : BC : D1:09:77:21:95:44 : ED : 34:06:57:1E : 03 : B1 : EF : 0E : F2:59:44:71:93:23:22:15:43:23:12 \" , \" A - 4E : 63:2D : AE : 14 : BC : D1:09:77:21:95:44 : ED : 34:06:57:1E : 03 : B1 : EF : 0E : F2:59:25:48:44:71:22:15:43:23:12 \" , ` ` ` # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass asynchronous = True
> > > thread = api . create _ bulk _ device _ enrollment ( enrollment _ identities , asynchronous = True )
> > > result = thread . get ( )
: param asynchronous bool
: param file enrollment _ identities : The ` CSV ` file containing the enrollment IDs . The maximum file size is 10MB . ( required )
: return : BulkResponse
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'asynchronous' ) :
return self . create_bulk_device_enrollment_with_http_info ( enrollment_identities , ** kwargs )
# noqa : E501
else :
( data ) = self . create_bulk_device_enrollment_with_http_info ( enrollment_identities , ** kwargs )
# noqa : E501
return data |
def sites ( c ) :
"""Build both doc sites w / maxed nitpicking .""" | # TODO : This is super lolzy but we haven ' t actually tackled nontrivial
# in - Python task calling yet , so we do this to get a copy of ' our ' context ,
# which has been updated with the per - collection config data of the
# docs / www subcollections .
docs_c = Context ( config = c . config . clone ( ) )
www_c = Context ( config = c . config . clone ( ) )
docs_c . update ( ** docs . configuration ( ) )
www_c . update ( ** www . configuration ( ) )
# Must build both normally first to ensure good intersphinx inventory files
# exist = / circular dependencies ahoy ! Do it quietly to avoid pulluting
# output ; only super - serious errors will bubble up .
# TODO : wants a ' temporarily tweak context settings ' contextmanager
# TODO : also a fucking spinner cuz this confuses me every time I run it
# when the docs aren ' t already prebuilt
docs_c [ "run" ] . hide = True
www_c [ "run" ] . hide = True
docs [ "build" ] ( docs_c )
www [ "build" ] ( www_c )
docs_c [ "run" ] . hide = False
www_c [ "run" ] . hide = False
# Run the actual builds , with nitpick = True ( nitpicks + tracebacks )
docs [ "build" ] ( docs_c , nitpick = True )
www [ "build" ] ( www_c , nitpick = True ) |
def _compute_magnitude_scaling ( self , mag , C ) :
"""Compute magnitude - scaling term ( Page 141 , Eq 1)""" | dmag = mag - 6.
return ( C [ 'B2' ] * dmag ) + ( C [ 'B3' ] * ( dmag ** 2. ) ) |
def _query ( self , params , direct = False ) :
""": param params : dict
: return : pybomb . clients . response""" | params [ "api_key" ] = self . _api_key
if "format" not in params :
params [ "format" ] = self . _default_format
response = self . _query_api ( params , direct )
self . _validate_response ( response )
return Response . from_response_data ( response ) |
def process_line ( self , line ) :
"""Process a line of data .
Sends the data through the pipe to the process and flush it . Reads a resulting line
and returns it .
Parameters
line : str
The data sent to process . Make sure it does not contain any newline characters .
Returns
str : The line returned by the Java process
Raises
Exception
In case of EOF is encountered .
IoError
In case it was impossible to read or write from the subprocess standard input / output .""" | assert isinstance ( line , str )
try :
self . _process . stdin . write ( as_binary ( line ) )
self . _process . stdin . write ( as_binary ( '\n' ) )
self . _process . stdin . flush ( )
result = as_unicode ( self . _process . stdout . readline ( ) )
if result == '' :
stderr = as_unicode ( self . _process . stderr . read ( ) )
raise Exception ( 'EOF encountered while reading stream. Stderr is {0}.' . format ( stderr ) )
return result
except Exception :
self . _process . terminate ( )
raise |
def answerPreCheckoutQuery ( self , pre_checkout_query_id , ok , error_message = None ) :
"""See : https : / / core . telegram . org / bots / api # answerprecheckoutquery""" | p = _strip ( locals ( ) )
return self . _api_request ( 'answerPreCheckoutQuery' , _rectify ( p ) ) |
def collection_response ( cls , resources , start = None , stop = None ) :
"""Return a response for the * resources * of the appropriate content type .
: param resources : resources to be returned in request
: type resource : list of : class : ` sandman . model . Model `
: rtype : : class : ` flask . Response `""" | if _get_acceptable_response_type ( ) == JSON :
return _collection_json_response ( cls , resources , start , stop )
else :
return _collection_html_response ( resources , start , stop ) |
def quaternion_inverse ( quaternion ) :
"""Return inverse of quaternion .
> > > q0 = random _ quaternion ( )
> > > q1 = quaternion _ inverse ( q0)
> > > numpy . allclose ( quaternion _ multiply ( q0 , q1 ) , [ 1 , 0 , 0 , 0 ] )
True""" | q = numpy . array ( quaternion , dtype = numpy . float64 , copy = True )
numpy . negative ( q [ 1 : ] , q [ 1 : ] )
return q / numpy . dot ( q , q ) |
def send_to_backends ( self , event ) :
"""Sends the event to all registered backends .
Logs and swallows all ` Exception ` .""" | for name , backend in six . iteritems ( self . backends ) :
try :
backend . send ( event )
except Exception : # pylint : disable = broad - except
LOG . exception ( 'Unable to send event to backend: %s' , name ) |
def agg ( self , aggregations ) :
"""Multiple aggregations optimized .
Parameters
aggregations : list of str
Which aggregations to perform .
Returns
DataFrame
DataFrame with the aggregations per column .""" | check_type ( aggregations , list )
df = _drop_str_columns ( self )
if len ( df . _data ) == 0 : # conforming to what pandas does
raise ValueError ( 'No results' )
new_index = Index ( np . array ( aggregations , dtype = np . bytes_ ) , np . dtype ( np . bytes_ ) )
new_data = OrderedDict ( ( column . name , _series_agg ( column , aggregations , new_index ) ) for column in df . _iter ( ) )
return DataFrame ( new_data , new_index ) |
def from_utf8 ( buf , errors = 'replace' ) :
"""Decodes a UTF - 8 compatible , ASCII string into a unicode object .
` buf `
string or unicode string to convert .
Returns unicode ` string .
* Raises a ` ` UnicodeDecodeError ` ` exception if encoding failed and
` errors ` isn ' t set to ' replace ' .""" | if isinstance ( buf , unicode ) :
return buf
else :
return unicode ( buf , 'utf-8' , errors ) |
def normalize_names ( column_names ) :
"""Given an arbitrary column name , translate to a SQL - normalized column
name a la CARTO ' s Import API will translate to
Examples
* ' Field : 2 ' - > ' field _ 2'
* ' 2 Items ' - > ' _ 2 _ items '
* ' Unnamed : 0 ' - > ' unnamed _ 0 ' ,
* ' 201moore ' - > ' _ 201moore ' ,
* ' 201moore ' - > ' _ 201moore _ 1 ' ,
* ' Acadia 1.2.3 ' - > ' acadia _ 1_2_3 ' ,
* ' old _ soaker ' - > ' old _ soaker ' ,
* ' _ testingTesting ' - > ' _ testingtesting ' ,
* 1 - > ' _ 1 ' ,
* 1.0 - > ' _ 1_0 ' ,
* ' public ' - > ' public ' ,
* ' SELECT ' - > ' _ select ' ,
* ' à ' - > ' a ' ,
* ' longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabesplittedrightnow ' - > ' longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabespli ' ,
* ' longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabesplittedrightnow ' - > ' longcolumnshouldbesplittedsomehowanditellyouwhereitsgonnabe _ 1 ' ,
* ' all ' - > ' _ all '
Args :
column _ names ( list ) : List of column names that will be SQL normalized
Returns :
list : List of SQL - normalized column names""" | result = [ ]
for column_name in column_names :
column = Column ( column_name ) . normalize ( forbidden_column_names = result )
result . append ( column . name )
return result |
def create_wish_list ( cls , wish_list , ** kwargs ) :
"""Create WishList
Create a new WishList
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ wish _ list ( wish _ list , async = True )
> > > result = thread . get ( )
: param async bool
: param WishList wish _ list : Attributes of wishList to create ( required )
: return : WishList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_wish_list_with_http_info ( wish_list , ** kwargs )
else :
( data ) = cls . _create_wish_list_with_http_info ( wish_list , ** kwargs )
return data |
def copyPort ( port , targetLNode , reverseDir , topPortName = None ) :
"""Create identical port on targetNode""" | newP = _copyPort ( port , targetLNode , reverseDir )
if topPortName is not None :
newP . name = topPortName
return newP |
def train ( self , X ) :
"""Trains multiple logistic regression classifiers to handle the multiclass
problem posed by ` ` X ` `
X ( numpy . ndarray ) : The input data matrix . This must be a numpy . ndarray
with 3 dimensions or an iterable containing 2 numpy . ndarrays with 2
dimensions each . Each correspond to the data for one of the input
classes , every row corresponds to one example of the data set , every
column , one different feature .
Returns :
Machine : A trained multiclass machine .""" | _trainer = bob . learn . linear . CGLogRegTrainer ( ** { 'lambda' : self . regularizer } )
if len ( X ) == 2 : # trains and returns a single logistic regression classifer
return _trainer . train ( add_bias ( X [ 0 ] ) , add_bias ( X [ 1 ] ) )
else : # trains and returns a multi - class logistic regression classifier
# use one - versus - all strategy
machines = [ ]
for k in range ( len ( X ) ) :
NC_range = list ( range ( 0 , k ) ) + list ( range ( k + 1 , len ( X ) ) )
machines . append ( _trainer . train ( add_bias ( numpy . vstack ( X [ NC_range ] ) ) , add_bias ( X [ k ] ) ) )
return MultiClassMachine ( machines ) |
def _sigma_inel ( self , Tp ) :
"""Inelastic cross - section for p - p interaction . KATV14 Eq . 1
Parameters
Tp : float
Kinetic energy of proton ( i . e . Ep - m _ p * c * * 2 ) [ GeV ]
Returns
sigma _ inel : float
Inelastic cross - section for p - p interaction [ 1 / cm2 ] .""" | L = np . log ( Tp / self . _Tth )
sigma = 30.7 - 0.96 * L + 0.18 * L ** 2
sigma *= ( 1 - ( self . _Tth / Tp ) ** 1.9 ) ** 3
return sigma * 1e-27 |
def parse_image_response ( self , response ) :
"""Parse a single object from the RETS feed
: param response : The response from the RETS server
: return : Object""" | if 'xml' in response . headers . get ( 'Content-Type' ) : # Got an XML response , likely an error code .
xml = xmltodict . parse ( response . text )
self . analyze_reply_code ( xml_response_dict = xml )
obj = self . _response_object_from_header ( obj_head_dict = response . headers , content = response . content )
return obj |
def generate ( env ) :
"""Add Builders and construction variables for MIPSPro to an Environment .""" | link . generate ( env )
env [ 'LINK' ] = env . Detect ( linkers ) or 'cc'
env [ 'SHLINKFLAGS' ] = SCons . Util . CLVar ( '$LINKFLAGS -shared' )
# _ _ RPATH is set to $ _ RPATH in the platform specification if that
# platform supports it .
env [ 'RPATHPREFIX' ] = '-rpath '
env [ 'RPATHSUFFIX' ] = ''
env [ '_RPATH' ] = '${_concat(RPATHPREFIX, RPATH, RPATHSUFFIX, __env__)}' |
def merge ( self , imgs ) :
"""Merge image channels .
Parameters
imgs : ` list ` of ` PIL . Image . Image `
Returns
` PIL . Image . Image `
Raises
ValueError
If image channel list is empty .""" | if not imgs :
raise ValueError ( 'empty channel list' )
if len ( imgs ) == 1 :
return imgs [ 0 ]
return Image . merge ( self . mode , imgs ) |
def read_version ( ) :
"""Read package version""" | with open ( './oi/version.py' ) as fh :
for line in fh :
if line . startswith ( 'VERSION' ) :
return line . split ( '=' ) [ 1 ] . strip ( ) . strip ( "'" ) |
def get_filterset ( self , request , queryset , view ) :
"""Sometimes there ' s no ` filterset _ class ` defined yet the client still
requests a filter . Make sure they see an error too . This means
we have to ` get _ filterset _ kwargs ( ) ` even if there ' s no ` filterset _ class ` .""" | # TODO : . base _ filters vs . . filters attr ( not always present )
filterset_class = self . get_filterset_class ( view , queryset )
kwargs = self . get_filterset_kwargs ( request , queryset , view )
self . _validate_filter ( kwargs . pop ( 'filter_keys' ) , filterset_class )
if filterset_class is None :
return None
return filterset_class ( ** kwargs ) |
def parse_version ( version : str ) -> tuple :
"""Parse a string formatted X [ . Y . Z ] version number into a tuple
> > > parse _ version ( ' 10.2.3 ' )
(10 , 2 , 3)
> > > parse _ version ( ' 12 ' )
(12 , 0 , 0)""" | if not version :
return None
parts = version . split ( '.' )
missing = 3 - len ( parts )
return tuple ( int ( i ) for i in parts + ( [ 0 ] * missing ) ) |
def train ( self , s , path = "spelling.txt" ) :
"""Counts the words in the given string and saves the probabilities at the given path .
This can be used to generate a new model for the Spelling ( ) constructor .""" | model = { }
for w in re . findall ( "[a-z]+" , s . lower ( ) ) :
model [ w ] = w in model and model [ w ] + 1 or 1
model = ( "%s %s" % ( k , v ) for k , v in sorted ( model . items ( ) ) )
model = "\n" . join ( model )
f = open ( path , "w" )
f . write ( model )
f . close ( ) |
def call_actions ( self , service_name , actions , expansions = None , raise_job_errors = True , raise_action_errors = True , timeout = None , ** kwargs ) :
"""Build and send a single job request with one or more actions .
Returns a list of action responses , one for each action in the same order as provided , or raises an exception
if any action response is an error ( unless ` raise _ action _ errors ` is passed as ` False ` ) or if the job response
is an error ( unless ` raise _ job _ errors ` is passed as ` False ` ) .
This method performs expansions if the Client is configured with an expansion converter .
: param service _ name : The name of the service to call
: type service _ name : union [ str , unicode ]
: param actions : A list of ` ActionRequest ` objects and / or dicts that can be converted to ` ActionRequest ` objects
: type actions : iterable [ union [ ActionRequest , dict ] ]
: param expansions : A dictionary representing the expansions to perform
: type expansions : dict
: param raise _ job _ errors : Whether to raise a JobError if the job response contains errors ( defaults to ` True ` )
: type raise _ job _ errors : bool
: param raise _ action _ errors : Whether to raise a CallActionError if any action responses contain errors ( defaults
to ` True ` )
: type raise _ action _ errors : bool
: param timeout : If provided , this will override the default transport timeout values to ; requests will expire
after this number of seconds plus some buffer defined by the transport , and the client will not
block waiting for a response for longer than this amount of time .
: type timeout : int
: param switches : A list of switch value integers
: type switches : list
: param correlation _ id : The request correlation ID
: type correlation _ id : union [ str , unicode ]
: param continue _ on _ error : Whether to continue executing further actions once one action has returned errors
: type continue _ on _ error : bool
: param context : A dictionary of extra values to include in the context header
: type context : dict
: param control _ extra : A dictionary of extra values to include in the control header
: type control _ extra : dict
: return : The job response
: rtype : JobResponse
: raise : ConnectionError , InvalidField , MessageSendError , MessageSendTimeout , MessageTooLarge ,
MessageReceiveError , MessageReceiveTimeout , InvalidMessage , JobError , CallActionError""" | return self . call_actions_future ( service_name , actions , expansions , raise_job_errors , raise_action_errors , timeout , ** kwargs ) . result ( ) |
def parse ( self , rec ) :
"""Retrieve row data from files associated with the ISATabRecord .""" | final_studies = [ ]
for study in rec . studies :
source_data = self . _parse_study ( study . metadata [ "Study File Name" ] , [ "Source Name" , "Sample Name" , "Comment[ENA_SAMPLE]" ] )
if source_data :
study . nodes = source_data
final_assays = [ ]
for assay in study . assays :
cur_assay = ISATabAssayRecord ( assay )
assay_data = self . _parse_study ( assay [ "Study Assay File Name" ] , [ "Sample Name" , "Extract Name" , "Raw Data File" , "Derived Data File" , "Image File" , "Acquisition Parameter Data File" , "Free Induction Decay Data File" ] )
cur_assay . nodes = assay_data
self . _get_process_nodes ( assay [ "Study Assay File Name" ] , cur_assay )
final_assays . append ( cur_assay )
study . assays = final_assays
# get process nodes
self . _get_process_nodes ( study . metadata [ "Study File Name" ] , study )
final_studies . append ( study )
rec . studies = final_studies
return rec |
def vserver_exists ( v_name , v_ip = None , v_port = None , v_type = None , ** connection_args ) :
'''Checks if a vserver exists
CLI Example :
. . code - block : : bash
salt ' * ' netscaler . vserver _ exists ' vserverName ' ''' | vserver = _vserver_get ( v_name , ** connection_args )
if vserver is None :
return False
if v_ip is not None and vserver . get_ipv46 ( ) != v_ip :
return False
if v_port is not None and vserver . get_port ( ) != v_port :
return False
if v_type is not None and vserver . get_servicetype ( ) . upper ( ) != v_type . upper ( ) :
return False
return True |
def log_last_error ( fname , context = None ) :
"""Log last error in filename * fname * - - * context * : string ( optional )""" | fd = open ( fname , 'a' )
log_time ( fd )
if context :
print ( "Context" , file = fd )
print ( "-------" , file = fd )
print ( "" , file = fd )
if PY2 :
print ( u' ' . join ( context ) . encode ( 'utf-8' ) . strip ( ) , file = fd )
else :
print ( context , file = fd )
print ( "" , file = fd )
print ( "Traceback" , file = fd )
print ( "---------" , file = fd )
print ( "" , file = fd )
traceback . print_exc ( file = fd )
print ( "" , file = fd )
print ( "" , file = fd ) |
def _api_all_views ( self ) :
"""Glances API RESTful implementation .
Return the JSON representation of all the plugins views
HTTP / 200 if OK
HTTP / 400 if plugin is not found
HTTP / 404 if others error""" | response . content_type = 'application/json; charset=utf-8'
try : # Get the JSON value of the stat view
limits = json . dumps ( self . stats . getAllViewsAsDict ( ) )
except Exception as e :
abort ( 404 , "Cannot get views (%s)" % ( str ( e ) ) )
return limits |
def _from_frame_string ( contents ) :
"""Convert a single frame XYZ string to a molecule""" | lines = contents . split ( "\n" )
num_sites = int ( lines [ 0 ] )
coords = [ ]
sp = [ ]
coord_patt = re . compile ( r"(\w+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)\s+([0-9\-\+\.eEdD]+)" )
for i in range ( 2 , 2 + num_sites ) :
m = coord_patt . search ( lines [ i ] )
if m :
sp . append ( m . group ( 1 ) )
# this is 1 - indexed
# this is 0 - indexed
# in case of 0.0D + 00 or 0.00d + 01 old double precision writing
# replace d or D by e for ten power exponent
xyz = [ val . lower ( ) . replace ( "d" , "e" ) for val in m . groups ( ) [ 1 : 4 ] ]
coords . append ( [ float ( val ) for val in xyz ] )
return Molecule ( sp , coords ) |
def extract_units ( string_in ) :
"""Extract units from parenthesis in a string . i . e . " elevation ( meters ) "
: param str string _ in :
: return str :""" | start = '('
stop = ')'
return string_in [ string_in . index ( start ) + 1 : string_in . index ( stop ) ] |
def fetch ( self , minutes = values . unset , start_date = values . unset , end_date = values . unset , task_queue_sid = values . unset , task_queue_name = values . unset , friendly_name = values . unset , task_channel = values . unset ) :
"""Fetch a WorkersStatisticsInstance
: param unicode minutes : Filter cumulative statistics by up to ' x ' minutes in the past .
: param datetime start _ date : Filter cumulative statistics by a start date .
: param datetime end _ date : Filter cumulative statistics by a end date .
: param unicode task _ queue _ sid : Filter the real - time and cumulative statistics based on Workers tied to a particular queue
: param unicode task _ queue _ name : Filter the real - time and cumulative statistics based on Workers tied to a particular queue
: param unicode friendly _ name : The friendly _ name
: param unicode task _ channel : Filter cumulative statistics by TaskChannel .
: returns : Fetched WorkersStatisticsInstance
: rtype : twilio . rest . taskrouter . v1 . workspace . worker . workers _ statistics . WorkersStatisticsInstance""" | return self . _proxy . fetch ( minutes = minutes , start_date = start_date , end_date = end_date , task_queue_sid = task_queue_sid , task_queue_name = task_queue_name , friendly_name = friendly_name , task_channel = task_channel , ) |
def fit ( self , X ) :
"""Fit Kernel density estimation to an list of values .
Args :
X : 1 - d ` np . ndarray ` or ` pd . Series ` or ` list ` datapoints to be estimated from .
This function will fit a gaussian _ kde model to a list of datapoints
and store it as a class attribute .""" | self . constant_value = self . _get_constant_value ( X )
if self . constant_value is None :
self . model = scipy . stats . gaussian_kde ( X )
else :
self . _replace_constant_methods ( )
self . fitted = True |
def set_attribute ( self , id , value , version = 1 ) :
"""Set attribute to a specific value
: param id : id of the attribute
: param value : value of the attribute
: param version : version of the attribute ( default = 1)""" | attributes = self . _get_attributes ( cache = True )
formatted_id = '{0}' . format ( id )
attributes [ 'attributes_values' ] [ formatted_id ] = value
response = self . requester . patch ( '/{endpoint}/custom-attributes-values/{id}' , endpoint = self . endpoint , id = self . id , payload = { 'attributes_values' : attributes [ 'attributes_values' ] , 'version' : version } )
cache_key = self . requester . get_full_url ( '/{endpoint}/custom-attributes-values/{id}' , endpoint = self . endpoint , id = self . id )
self . requester . cache . put ( cache_key , response )
return response . json ( ) |
def depfilter ( class_ , filter_name ) :
"""Register the decorated method at the addressed : class : ` ~ . callbacks . Filter `
on a class on which the service depends .
: param class _ : A service class which is listed in the
: attr : ` ~ . Meta . ORDER _ AFTER ` relationship .
: type class _ : : class : ` Service ` class or
: class : ` aioxmpp . stream . StanzaStream `
: param filter _ name : Attribute name of the filter to register at
: type filter _ name : : class : ` str `
The filter at which the decorated method is registered is discovered by
accessing the attribute with the name ` filter _ name ` on the instance of the
dependent class ` class _ ` . If ` class _ ` is
: class : ` aioxmpp . stream . StanzaStream ` , the filter is searched for on the
stream ( and no dependendency needs to be declared ) .
. . versionadded : : 0.9""" | spec = _depfilter_spec ( class_ , filter_name )
def decorator ( f ) :
add_handler_spec ( f , spec , )
return f
return decorator |
def get_partition_trees ( self , p ) :
"""Return the trees associated with a partition , p""" | trees = [ ]
for grp in p . get_membership ( ) :
try :
result = self . get_group_result ( grp )
trees . append ( result [ 'ml_tree' ] )
except ValueError :
trees . append ( None )
logger . error ( 'No tree found for group {}' . format ( grp ) )
return trees |
def __bindings ( self ) :
"""Binds events to handlers""" | self . textctrl . Bind ( wx . EVT_TEXT , self . OnText )
self . fontbutton . Bind ( wx . EVT_BUTTON , self . OnFont )
self . Bind ( csel . EVT_COLOURSELECT , self . OnColor ) |
def create_device ( device_args ) :
"""Creates an AlarmDecoder from the specified USB device arguments .
: param device _ args : Tuple containing information on the USB device to open .
: type device _ args : Tuple ( vid , pid , serialnumber , interface _ count , description )""" | device = AlarmDecoder ( USBDevice . find ( device_args ) )
device . on_message += handle_message
device . open ( )
return device |
def _next_id ( self ) :
"""Return the next available message id .""" | assert get_thread_ident ( ) == self . ioloop_thread_id
self . _last_msg_id += 1
return str ( self . _last_msg_id ) |
def node_is_noop ( node : ast . AST ) -> bool :
"""Node does nothing .""" | return isinstance ( node . value , ast . Str ) if isinstance ( node , ast . Expr ) else isinstance ( node , ast . Pass ) |
def post_status ( app , user , status , visibility = 'public' , media_ids = None , sensitive = False , spoiler_text = None , in_reply_to_id = None ) :
"""Posts a new status .
https : / / github . com / tootsuite / documentation / blob / master / Using - the - API / API . md # posting - a - new - status""" | # Idempotency key assures the same status is not posted multiple times
# if the request is retried .
headers = { "Idempotency-Key" : uuid . uuid4 ( ) . hex }
return http . post ( app , user , '/api/v1/statuses' , { 'status' : status , 'media_ids[]' : media_ids , 'visibility' : visibility , 'sensitive' : str_bool ( sensitive ) , 'spoiler_text' : spoiler_text , 'in_reply_to_id' : in_reply_to_id , } , headers = headers ) . json ( ) |
def msg_callback ( self , callback ) :
"""Set the message callback .""" | if callable ( callback ) :
self . _msg_callback = callback
else :
self . _msg_callback = None |
def run ( self ) :
"""Run the main loop . Returns exit code .""" | self . exit_code = 1
self . mainloop = GLib . MainLoop ( )
try :
future = ensure_future ( self . _start_async_tasks ( ) )
future . callbacks . append ( self . set_exit_code )
self . mainloop . run ( )
return self . exit_code
except KeyboardInterrupt :
return 1 |
def _append_comment ( ret , comment ) :
'''append ` ` comment ` ` to ` ` ret [ ' comment ' ] ` `''' | if ret [ 'comment' ] :
ret [ 'comment' ] = ret [ 'comment' ] . rstrip ( ) + '\n' + comment
else :
ret [ 'comment' ] = comment
return ret |
def sma ( self , np_array , n , array = False ) :
"""简单均线""" | if n < 2 :
result = np_array
else :
result = talib . SMA ( np_array , n )
if array :
return result
return result [ - 1 ] |
def ks_synth ( freq ) :
"""Synthesize the given frequency into a Stream by using a model based on
Karplus - Strong .""" | ks_mem = ( sum ( lz . sinusoid ( x * freq ) for x in [ 1 , 3 , 9 ] ) + lz . white_noise ( ) + lz . Stream ( - 1 , 1 ) ) / 5
return lz . karplus_strong ( freq , memory = ks_mem ) |
def transform ( self , X ) :
"""Encode categorical columns into sparse matrix with one - hot - encoding .
Args :
X ( pandas . DataFrame ) : categorical columns to encode
Returns :
X _ new ( scipy . sparse . coo _ matrix ) : sparse matrix encoding categorical
variables into dummy variables""" | for i , col in enumerate ( X . columns ) :
X_col = self . _transform_col ( X [ col ] , i )
if X_col is not None :
if i == 0 :
X_new = X_col
else :
X_new = sparse . hstack ( ( X_new , X_col ) )
logger . debug ( '{} --> {} features' . format ( col , self . label_encoder . label_maxes [ i ] ) )
return X_new |
def _init_notes ( self ) :
"""Set up the UserNotes page with the initial JSON schema .""" | self . cached_json = { 'ver' : self . schema , 'users' : { } , 'constants' : { 'users' : [ x . name for x in self . subreddit . moderator ( ) ] , 'warnings' : Note . warnings } }
self . set_json ( 'Initializing JSON via puni' , True ) |
def _load_fits ( self , h5file ) :
"""Loads fits from h5file and returns a dictionary of fits .""" | fits = { }
for key in [ 'mf' ] :
fits [ key ] = self . _load_scalar_fit ( fit_key = key , h5file = h5file )
for key in [ 'chif' , 'vf' ] :
fits [ key ] = self . _load_vector_fit ( key , h5file )
return fits |
def extract_annotation ( data ) :
"""Extract names and values of rows and columns .
Parameter :
data : DataFrame | Panel
Returns :
col _ name , col _ values , row _ name , row _ values""" | xlabel = None
xvalues = None
ylabel = None
yvalues = None
if hasattr ( data , 'minor_axis' ) :
xvalues = data . minor_axis
if hasattr ( data . minor_axis , 'name' ) :
xlabel = data . minor_axis . name
if hasattr ( data , 'columns' ) :
xvalues = data . columns
if hasattr ( data . columns , 'name' ) :
xlabel = data . columns . name
if hasattr ( data , 'major_axis' ) :
yvalues = data . major_axis
if hasattr ( data . major_axis , 'name' ) :
ylabel = data . major_axis . name
if hasattr ( data , 'index' ) :
yvalues = data . index
if hasattr ( data . index , 'name' ) :
ylabel = data . index . name
return xlabel , xvalues , ylabel , yvalues |
def _evaluate ( dataset , predictions ) :
'''Evaluate function .''' | f1_result = exact_match = total = 0
count = 0
for article in dataset :
for paragraph in article [ 'paragraphs' ] :
for qa_pair in paragraph [ 'qas' ] :
total += 1
if qa_pair [ 'id' ] not in predictions :
count += 1
continue
ground_truths = list ( map ( lambda x : x [ 'text' ] , qa_pair [ 'answers' ] ) )
prediction = predictions [ qa_pair [ 'id' ] ]
exact_match += metric_max_over_ground_truths ( exact_match_score , prediction , ground_truths )
f1_result += metric_max_over_ground_truths ( f1_score , prediction , ground_truths )
print ( 'total' , total , 'exact_match' , exact_match , 'unanswer_question ' , count )
exact_match = 100.0 * exact_match / total
f1_result = 100.0 * f1_result / total
return { 'exact_match' : exact_match , 'f1' : f1_result } |
def _process_call ( self , req , resource ) :
"""This is were all callbacks are made and the req is processed .""" | if resource == "ports" :
if req . method . upper ( ) in ( 'PUT' , 'POST' ) : # Pass the request back to be processed by other filters
# and Neutron first
resp = req . get_response ( self . app )
if resp . status_code not in ( 200 , 204 ) :
return resp
resp_body = resp . json
# Variables for Nova Call , obtained from Neutron response
action = "create"
address = resp_body [ 'port' ] [ 'mac_address' ]
fixed_ips = resp_body [ 'port' ] [ 'fixed_ips' ]
instance_id = resp_body [ 'port' ] [ 'instance_id' ]
network_id = resp_body [ 'port' ] [ 'network_id' ]
port_id = resp_body [ 'port' ] [ 'id' ]
tenant_id = resp_body [ 'port' ] [ 'tenant_id' ]
elif req . method . upper ( ) == "DELETE" :
action = "delete"
port_id = req . path . split ( "/" )
port_id = port_id [ port_id . index ( "ports" ) + 1 ]
# DELETEs do not have all the port info that we need , so a
# call to Neutron must be made first .
neutron_conn = NeutronConn ( log = self . log , port = self . neutron_port , url = self . neutron_url , verify_ssl = self . neutron_verify_ssl )
status , neutron_resp = neutron_conn . ports ( port_id = port_id )
if isinstance ( neutron_resp , Exception ) :
return neutron_resp
elif status not in ( 200 , 204 ) :
resp = Response ( )
resp . status = 500
new_body = { "neutron_callback" : { "port_id" : port_id , "status" : "error" , "error" : neutron_resp } }
resp . body = json . dumps ( new_body )
return resp
# Now that we have the port info , we can make the variables
# for the Nova Call
address = neutron_resp [ 'port' ] [ 'mac_address' ]
fixed_ips = neutron_resp [ 'port' ] [ 'fixed_ips' ]
instance_id = neutron_resp [ 'port' ] [ 'instance_id' ]
network_id = neutron_resp [ 'port' ] [ 'network_id' ]
tenant_id = neutron_resp [ 'port' ] [ 'tenant_id' ]
# Port info saved , now send the request back to processed by
# other filters and Neutron
resp = req . get_response ( self . app )
if resp . status_code not in ( 200 , 204 ) :
return resp
else :
new_body = resp . json
new_body [ 'neutron_callback' ] = { "port_id" : port_id , "status" : "success" }
resp . body = json . dumps ( new_body )
nova_conn = NovaConn ( log = self . log , url = self . nova_url , verify_ssl = self . nova_verify_ssl )
status , nova_resp = nova_conn . admin_virtual_interfaces ( action = action , address = address , fixed_ips = fixed_ips , network_id = network_id , port_id = port_id , tenant_id = tenant_id , instance_id = instance_id )
if isinstance ( nova_resp , Exception ) :
return nova_resp
elif status not in ( 200 , 204 ) : # We ' ll likely want to provide the customer with a call here
# such as virtual - interface - delete / virtual - interface - update
resp . status = 500
new_body = resp . json
new_body [ 'nova_callback' ] = { "instance_id" : instance_id , "status" : "error" , "error" : nova_resp }
resp . body = json . dumps ( new_body )
else :
new_body = resp . json
new_body [ 'nova_callback' ] = { "instance_id" : instance_id , "status" : "success" }
resp . body = json . dumps ( new_body )
return resp
elif resource == "ip_addresses" :
pass
# Insert logic to call Nova for ip _ addresses changes here
return resp |
def append_flags_into_file ( self , filename ) :
"""Appends all flags assignments from this FlagInfo object to a file .
Output will be in the format of a flagfile .
NOTE : MUST mirror the behavior of the C + + AppendFlagsIntoFile
from https : / / github . com / gflags / gflags .
Args :
filename : str , name of the file .""" | with open ( filename , 'a' ) as out_file :
out_file . write ( self . flags_into_string ( ) ) |
def add_host_comment ( self , host , author , comment ) :
"""Add a host comment
Format of the line that triggers function call : :
ADD _ HOST _ COMMENT ; < host _ name > ; < persistent : obsolete > ; < author > ; < comment >
: param host : host to add the comment
: type host : alignak . objects . host . Host
: param author : author name
: type author : str
: param comment : text comment
: type comment : str
: return : None""" | data = { 'author' : author , 'comment' : comment , 'comment_type' : 1 , 'entry_type' : 1 , 'source' : 1 , 'expires' : False , 'ref' : host . uuid }
comm = Comment ( data )
host . add_comment ( comm )
self . send_an_element ( host . get_update_status_brok ( ) )
try :
brok = make_monitoring_log ( 'info' , "HOST COMMENT: %s;%s;%s" % ( host . get_name ( ) , str ( author , 'utf-8' ) , str ( comment , 'utf-8' ) ) )
except TypeError :
brok = make_monitoring_log ( 'info' , "HOST COMMENT: %s;%s;%s" % ( host . get_name ( ) , author , comment ) )
self . send_an_element ( brok )
self . send_an_element ( comm . get_comment_brok ( self . hosts [ host ] . get_name ( ) ) ) |
def build ( self , ** kwargs ) :
"""Build an image and return it . Similar to the ` ` docker build ` `
command . Either ` ` path ` ` or ` ` fileobj ` ` must be set .
If you have a tar file for the Docker build context ( including a
Dockerfile ) already , pass a readable file - like object to ` ` fileobj ` `
and also pass ` ` custom _ context = True ` ` . If the stream is compressed
also , set ` ` encoding ` ` to the correct value ( e . g ` ` gzip ` ` ) .
If you want to get the raw output of the build , use the
: py : meth : ` ~ docker . api . build . BuildApiMixin . build ` method in the
low - level API .
Args :
path ( str ) : Path to the directory containing the Dockerfile
fileobj : A file object to use as the Dockerfile . ( Or a file - like
object )
tag ( str ) : A tag to add to the final image
quiet ( bool ) : Whether to return the status
nocache ( bool ) : Don ' t use the cache when set to ` ` True ` `
rm ( bool ) : Remove intermediate containers . The ` ` docker build ` `
command now defaults to ` ` - - rm = true ` ` , but we have kept the old
default of ` False ` to preserve backward compatibility
timeout ( int ) : HTTP timeout
custom _ context ( bool ) : Optional if using ` ` fileobj ` `
encoding ( str ) : The encoding for a stream . Set to ` ` gzip ` ` for
compressing
pull ( bool ) : Downloads any updates to the FROM image in Dockerfiles
forcerm ( bool ) : Always remove intermediate containers , even after
unsuccessful builds
dockerfile ( str ) : path within the build context to the Dockerfile
buildargs ( dict ) : A dictionary of build arguments
container _ limits ( dict ) : A dictionary of limits applied to each
container created by the build process . Valid keys :
- memory ( int ) : set memory limit for build
- memswap ( int ) : Total memory ( memory + swap ) , - 1 to disable
swap
- cpushares ( int ) : CPU shares ( relative weight )
- cpusetcpus ( str ) : CPUs in which to allow execution , e . g . ,
` ` " 0-3 " ` ` , ` ` " 0,1 " ` `
shmsize ( int ) : Size of ` / dev / shm ` in bytes . The size must be
greater than 0 . If omitted the system uses 64MB
labels ( dict ) : A dictionary of labels to set on the image
cache _ from ( list ) : A list of images used for build cache
resolution
target ( str ) : Name of the build - stage to build in a multi - stage
Dockerfile
network _ mode ( str ) : networking mode for the run commands during
build
squash ( bool ) : Squash the resulting images layers into a
single layer .
extra _ hosts ( dict ) : Extra hosts to add to / etc / hosts in building
containers , as a mapping of hostname to IP address .
platform ( str ) : Platform in the format ` ` os [ / arch [ / variant ] ] ` ` .
isolation ( str ) : Isolation technology used during build .
Default : ` None ` .
use _ config _ proxy ( bool ) : If ` ` True ` ` , and if the docker client
configuration file ( ` ` ~ / . docker / config . json ` ` by default )
contains a proxy configuration , the corresponding environment
variables will be set in the container being built .
Returns :
( tuple ) : The first item is the : py : class : ` Image ` object for the
image that was build . The second item is a generator of the
build logs as JSON - decoded objects .
Raises :
: py : class : ` docker . errors . BuildError `
If there is an error during the build .
: py : class : ` docker . errors . APIError `
If the server returns any other error .
` ` TypeError ` `
If neither ` ` path ` ` nor ` ` fileobj ` ` is specified .""" | resp = self . client . api . build ( ** kwargs )
if isinstance ( resp , six . string_types ) :
return self . get ( resp )
last_event = None
image_id = None
result_stream , internal_stream = itertools . tee ( json_stream ( resp ) )
for chunk in internal_stream :
if 'error' in chunk :
raise BuildError ( chunk [ 'error' ] , result_stream )
if 'stream' in chunk :
match = re . search ( r'(^Successfully built |sha256:)([0-9a-f]+)$' , chunk [ 'stream' ] )
if match :
image_id = match . group ( 2 )
last_event = chunk
if image_id :
return ( self . get ( image_id ) , result_stream )
raise BuildError ( last_event or 'Unknown' , result_stream ) |
def target ( self ) :
"""Return the target of the BFD file being processed .""" | if not self . _ptr :
raise BfdException ( "BFD not initialized" )
return _bfd . get_bfd_attribute ( self . _ptr , BfdAttributes . TARGET ) |
def _get_min_distance_to_sub_trench ( lons , lats ) :
"""Compute and return minimum distance between subduction trench
and points specified by ' lon ' and ' lat '
The method creates an instance of
: class : ` openquake . hazardlib . geo . SimpleFaultSurface ` to model the subduction
trench . The surface is assumed vertical and extending from 0 to 10 km
depth .
The 10 km depth value is arbitrary given that distance calculation depend
only on top edge depth . The method calls then
: meth : ` openquake . hazardlib . geo . base . BaseSurface . get _ rx _ distance `
and return its absolute value .""" | trench = _construct_surface ( SUB_TRENCH_LONS , SUB_TRENCH_LATS , 0. , 10. )
sites = Mesh ( lons , lats , None )
return np . abs ( trench . get_rx_distance ( sites ) ) |
def marvcli_cleanup ( ctx , discarded , unused_tags ) :
"""Cleanup unused tags and discarded datasets .""" | if not any ( [ discarded , unused_tags ] ) :
click . echo ( ctx . get_help ( ) )
ctx . exit ( 1 )
site = create_app ( ) . site
if discarded :
site . cleanup_discarded ( )
if unused_tags :
site . cleanup_tags ( )
site . cleanup_relations ( ) |
def process_json_file ( file_path , pmid = None , extra_annotations = None , add_grounding = True ) :
"""Extracts statements from the given ISI output file .
Parameters
file _ path : str
The ISI output file from which to extract statements
pmid : int
The PMID of the document being preprocessed , or None if not
specified
extra _ annotations : dict
Extra annotations to be added to each statement from this document
( can be the empty dictionary )
add _ grounding : Optional [ bool ]
If True the extracted Statements ' grounding is mapped""" | logger . info ( 'Extracting from %s' % file_path )
with open ( file_path , 'rb' ) as fh :
jd = json . load ( fh )
ip = IsiProcessor ( jd , pmid , extra_annotations )
ip . get_statements ( )
if add_grounding :
ip . add_grounding ( )
return ip |
def tofits ( outfilename , pixelarray , hdr = None , verbose = True ) :
"""Takes a 2D numpy array and write it into a FITS file .
If you specify a header ( pyfits format , as returned by fromfits ( ) ) it will be used for the image .
You can give me boolean numpy arrays , I will convert them into 8 bit integers .""" | pixelarrayshape = pixelarray . shape
if verbose :
print "FITS export shape : (%i, %i)" % ( pixelarrayshape [ 0 ] , pixelarrayshape [ 1 ] )
if pixelarray . dtype . name == "bool" :
pixelarray = np . cast [ "uint8" ] ( pixelarray )
if os . path . isfile ( outfilename ) :
os . remove ( outfilename )
if hdr == None : # then a minimal header will be created
hdu = pyfits . PrimaryHDU ( pixelarray . transpose ( ) )
else : # this if else is probably not needed but anyway . . .
hdu = pyfits . PrimaryHDU ( pixelarray . transpose ( ) , hdr )
hdu . writeto ( outfilename )
if verbose :
print "Wrote %s" % outfilename |
def _get_all_lengths ( self ) :
"""For every utterance , get the length of the data in every container . Return a list of tuples .""" | utt_lengths = { }
for utt_idx in self . utt_ids :
per_container = [ c . _file [ utt_idx ] . shape [ 0 ] for c in self . containers ]
utt_lengths [ utt_idx ] = tuple ( per_container )
return utt_lengths |
def com_google_fonts_check_metadata_valid_filename_values ( font , family_metadata ) :
"""METADATA . pb font . filename field contains font name in right format ?""" | expected = os . path . basename ( font )
failed = True
for font_metadata in family_metadata . fonts :
if font_metadata . filename == expected :
failed = False
yield PASS , ( "METADATA.pb filename field contains" " font name in right format." )
break
if failed :
yield FAIL , ( "None of the METADATA.pb filename fields match" f" correct font name format (\"{expected}\")." ) |
def ConfigureRequest ( self , upload_config , http_request , url_builder ) :
"""Configure the request and url for this upload .""" | # Validate total _ size vs . max _ size
if ( self . total_size and upload_config . max_size and self . total_size > upload_config . max_size ) :
raise exceptions . InvalidUserInputError ( 'Upload too big: %s larger than max size %s' % ( self . total_size , upload_config . max_size ) )
# Validate mime type
if not util . AcceptableMimeType ( upload_config . accept , self . mime_type ) :
raise exceptions . InvalidUserInputError ( 'MIME type %s does not match any accepted MIME ranges %s' % ( self . mime_type , upload_config . accept ) )
self . __SetDefaultUploadStrategy ( upload_config , http_request )
if self . strategy == SIMPLE_UPLOAD :
url_builder . relative_path = upload_config . simple_path
if http_request . body :
url_builder . query_params [ 'uploadType' ] = 'multipart'
self . __ConfigureMultipartRequest ( http_request )
else :
url_builder . query_params [ 'uploadType' ] = 'media'
self . __ConfigureMediaRequest ( http_request )
# Once the entire body is written , compress the body if configured
# to . Both multipart and media request uploads will read the
# entire stream into memory , which means full compression is also
# safe to perform . Because the strategy is set to SIMPLE _ UPLOAD ,
# StreamInChunks throws an exception , meaning double compression
# cannot happen .
if self . __gzip_encoded :
http_request . headers [ 'Content-Encoding' ] = 'gzip'
# Turn the body into a stream so that we can compress it , then
# read the compressed bytes . In the event of a retry ( e . g . if
# our access token has expired ) , we need to be able to re - read
# the body , which we can ' t do with a stream . So , we consume the
# bytes from the stream now and store them in a re - readable
# bytes container .
http_request . body = ( compression . CompressStream ( six . BytesIO ( http_request . body ) ) [ 0 ] . read ( ) )
else :
url_builder . relative_path = upload_config . resumable_path
url_builder . query_params [ 'uploadType' ] = 'resumable'
self . __ConfigureResumableRequest ( http_request ) |
def make_trace_api ( client ) :
"""Create an instance of the gapic Trace API .
Args :
client ( ~ google . cloud . trace . client . Client ) : The client that holds
configuration details .
Returns :
A : class : ` ~ google . cloud . trace . _ gapic . _ TraceAPI ` instance with the
proper configurations .""" | generated = trace_service_client . TraceServiceClient ( credentials = client . _credentials , client_info = _CLIENT_INFO )
return _TraceAPI ( generated , client ) |
def parse_away_face_offs ( self ) :
"""Parse only the away faceoffs
: returns : ` ` self ` ` on success , ` ` None ` ` otherwise""" | self . __set_team_docs ( )
self . face_offs [ 'away' ] = FaceOffRep . __read_team_doc ( self . __vis_doc )
return self |
def add_root_objective_bank ( self , alias = None , objective_bank_id = None ) :
"""Adds a root objective bank .
arg : objective _ bank _ id ( osid . id . Id ) : the ` ` Id ` ` of an
objective bank
raise : AlreadyExists - ` ` objective _ bank _ id ` ` is already in
hierarchy
raise : NotFound - ` ` objective _ bank _ id ` ` not found
raise : NullArgument - ` ` objective _ bank _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | url_path = self . _urls . roots ( alias = alias )
current_root_ids = self . _get_request ( url_path ) [ 'ids' ]
current_root_ids . append ( str ( objective_bank_id ) )
new_root_ids = { 'ids' : current_root_ids }
return self . _put_request ( url_path , new_root_ids ) |
def read ( self , size = None ) :
"""Reads a byte string from the file - like object at the current offset .
The function will read a byte string of the specified size or
all of the remaining data if no size was specified .
Args :
size ( Optional [ int ] ) : number of bytes to read , where None is all
remaining data .
Returns :
bytes : data read .
Raises :
IOError : if the read failed .
OSError : if the read failed .""" | if not self . _is_open :
raise IOError ( 'Not opened.' )
return self . _vshadow_store . read ( size ) |
def location ( self ) :
"""Returns the geolocation as a lat / lng pair""" | try :
lat , lng = self [ "metadata" ] [ "latitude" ] , self [ "metadata" ] [ "longitude" ]
except KeyError :
return None
if not lat or not lng :
return None
return lat , lng |
def shutdown_login_server ( self , ) :
"""Shutdown the login server and thread
: returns : None
: rtype : None
: raises : None""" | log . debug ( 'Shutting down the login server thread.' )
self . login_server . shutdown ( )
self . login_server . server_close ( )
self . login_thread . join ( ) |
def ToJson ( self ) :
"""Convert object members to a dictionary that can be parsed as JSON .
Returns :
dict :""" | name = 'Contract'
try :
name = self . Name . decode ( 'utf-8' )
except Exception as e :
pass
jsn = { 'version' : self . StateVersion }
jsn_code = self . Code . ToJson ( )
jsn_contract = { 'name' : name , 'code_version' : self . CodeVersion . decode ( 'utf-8' ) , 'author' : self . Author . decode ( 'utf-8' ) , 'email' : self . Email . decode ( 'utf-8' ) , 'description' : self . Description . decode ( 'utf-8' ) , 'properties' : { 'storage' : self . HasStorage , 'dynamic_invoke' : self . HasDynamicInvoke , 'payable' : self . Payable } }
jsn . update ( jsn_code )
jsn . update ( jsn_contract )
if self . _nep_token :
jsn [ 'token' ] = self . _nep_token . ToJson ( )
return jsn |
def watch_for_events ( ) :
"""Wait for events and print them to stdout .""" | fd = inotify . init ( )
try :
wd = inotify . add_watch ( fd , '/tmp' , inotify . IN_CLOSE_WRITE )
while True :
for event in inotify . get_events ( fd ) :
print ( "event:" , event . name , event . get_mask_description ( ) )
finally :
os . close ( fd ) |
def smartDumpDictHdf5 ( RV , o ) :
"""Dump a dictionary where each page is a list or an array or still a dictionary ( in this case , it iterates )""" | for key in list ( RV . keys ( ) ) :
if type ( RV [ key ] ) == dict :
g = o . create_group ( key )
smartDumpDictHdf5 ( RV [ key ] , g )
else :
o . create_dataset ( name = key , data = SP . array ( RV [ key ] ) , chunks = True , compression = 'gzip' ) |
def save ( self , to_save , manipulate = True , check_keys = True , ** kwargs ) :
"""Save a document in this collection .
* * DEPRECATED * * - Use : meth : ` insert _ one ` or : meth : ` replace _ one ` instead .
. . versionchanged : : 3.0
Removed the ` safe ` parameter . Pass ` ` w = 0 ` ` for unacknowledged write
operations .""" | warnings . warn ( "save is deprecated. Use insert_one or replace_one " "instead" , DeprecationWarning , stacklevel = 2 )
common . validate_is_document_type ( "to_save" , to_save )
write_concern = None
collation = validate_collation_or_none ( kwargs . pop ( 'collation' , None ) )
if kwargs :
write_concern = WriteConcern ( ** kwargs )
with self . _socket_for_writes ( ) as sock_info :
if not ( isinstance ( to_save , RawBSONDocument ) or "_id" in to_save ) :
return self . _insert ( sock_info , to_save , True , check_keys , manipulate , write_concern )
else :
self . _update ( sock_info , { "_id" : to_save [ "_id" ] } , to_save , True , check_keys , False , manipulate , write_concern , collation = collation )
return to_save . get ( "_id" ) |
def find_by_id ( self , project , params = { } , ** options ) :
"""Returns the complete project record for a single project .
Parameters
project : { Id } The project to get .
[ params ] : { Object } Parameters for the request""" | path = "/projects/%s" % ( project )
return self . client . get ( path , params , ** options ) |
def load_extension ( self , name ) :
"""Loads an extension .
An extension is a python module that contains commands , cogs , or
listeners .
An extension must have a global function , ` ` setup ` ` defined as
the entry point on what to do when the extension is loaded . This entry
point must have a single argument , the ` ` bot ` ` .
Parameters
name : : class : ` str `
The extension name to load . It must be dot separated like
regular Python imports if accessing a sub - module . e . g .
` ` foo . test ` ` if you want to import ` ` foo / test . py ` ` .
Raises
ExtensionNotFound
The extension could not be imported .
ExtensionAlreadyLoaded
The extension is already loaded .
NoEntryPointError
The extension does not have a setup function .
ExtensionFailed
The extension setup function had an execution error .""" | if name in self . __extensions :
raise errors . ExtensionAlreadyLoaded ( name )
try :
lib = importlib . import_module ( name )
except ImportError as e :
raise errors . ExtensionNotFound ( name , e ) from e
else :
self . _load_from_module_spec ( lib , name ) |
def save ( self , target , ensure_ascii = True ) :
"""https : / / github . com / frictionlessdata / tableschema - py # schema""" | mode = 'w'
encoding = 'utf-8'
if six . PY2 :
mode = 'wb'
encoding = None
helpers . ensure_dir ( target )
with io . open ( target , mode = mode , encoding = encoding ) as file :
json . dump ( self . __current_descriptor , file , indent = 4 , ensure_ascii = ensure_ascii ) |
def user_patterns ( self ) :
""": class : ` ~ zhmcclient . UserPatternManager ` : Access to the
: term : ` User Patterns < User Pattern > ` in this Console .""" | # We do here some lazy loading .
if not self . _user_patterns :
self . _user_patterns = UserPatternManager ( self )
return self . _user_patterns |
def get ( self , index ) :
"""Get the element by index . If index is out of bounds for
the internal list , None is returned . Indexes cannot be
negative .
: param int index : retrieve element by positive index in list
: rtype : SubElement or None""" | if self and ( index <= len ( self ) - 1 ) :
return self . _result_cache [ index ] |
def wait_for_service ( self , interval = 1 ) :
"""Wait for the online model to be ready for service .
: param interval : check interval""" | while self . status in ( OnlineModel . Status . DEPLOYING , OnlineModel . Status . UPDATING ) :
time . sleep ( interval )
if self . status == OnlineModel . Status . DEPLOY_FAILED :
raise OnlineModelError ( self . last_fail_msg , self )
elif self . status != OnlineModel . Status . SERVING :
raise OnlineModelError ( 'Unexpected status occurs: %s' % self . status . value , self ) |
def hungarian ( A , B ) :
"""Hungarian reordering .
Assume A and B are coordinates for atoms of SAME type only""" | # should be kabasch here i think
distances = cdist ( A , B , 'euclidean' )
# Perform Hungarian analysis on distance matrix between atoms of 1st
# structure and trial structure
indices_a , indices_b = linear_sum_assignment ( distances )
return indices_b |
def set_scale ( self , xscale = None , yscale = None , zscale = None , reset_camera = True ) :
"""Scale all the datasets in the scene of the active renderer .
Scaling in performed independently on the X , Y and Z axis .
A scale of zero is illegal and will be replaced with one .
Parameters
xscale : float , optional
Scaling of the x axis . Must be greater than zero .
yscale : float , optional
Scaling of the y axis . Must be greater than zero .
zscale : float , optional
Scaling of the z axis . Must be greater than zero .
reset _ camera : bool , optional
Resets camera so all actors can be seen .""" | self . renderer . set_scale ( xscale , yscale , zscale , reset_camera ) |
def default_update_stack ( self , fqn , template , old_parameters , parameters , tags , stack_policy = None , ** kwargs ) :
"""Update a Cloudformation stack in default mode .
Args :
fqn ( str ) : The fully qualified name of the Cloudformation stack .
template ( : class : ` stacker . providers . base . Template ` ) : A Template
object to use when updating the stack .
old _ parameters ( list ) : A list of dictionaries that defines the
parameter list on the existing Cloudformation stack .
parameters ( list ) : A list of dictionaries that defines the
parameter list to be applied to the Cloudformation stack .
tags ( list ) : A list of dictionaries that defines the tags
that should be applied to the Cloudformation stack .
stack _ policy ( : class : ` stacker . providers . base . Template ` ) : A template
object representing a stack policy .""" | logger . debug ( "Using default provider mode for %s." , fqn )
args = generate_cloudformation_args ( fqn , parameters , tags , template , service_role = self . service_role , stack_policy = stack_policy , )
try :
self . cloudformation . update_stack ( ** args )
except botocore . exceptions . ClientError as e :
if "No updates are to be performed." in str ( e ) :
logger . debug ( "Stack %s did not change, not updating." , fqn , )
raise exceptions . StackDidNotChange
elif e . response [ 'Error' ] [ 'Message' ] == ( 'TemplateURL must ' 'reference a valid ' 'S3 object to which ' 'you have access.' ) :
s3_fallback ( fqn , template , parameters , tags , self . cloudformation . update_stack , self . service_role )
else :
raise |
def scale_sfs_folded ( s , n ) :
"""Scale a folded site frequency spectrum .
Parameters
s : array _ like , int , shape ( n _ chromosomes / / 2 , )
Folded site frequency spectrum .
n : int
Number of chromosomes called .
Returns
sfs _ folded _ scaled : ndarray , int , shape ( n _ chromosomes / / 2 , )
Scaled folded site frequency spectrum .""" | k = np . arange ( s . shape [ 0 ] )
out = s * k * ( n - k ) / n
return out |
def get_objgrpwr ( self , goea_results ) :
"""Get a GrpWr object to write grouped GOEA results .""" | sortobj = self . get_sortobj ( goea_results )
return GrpWr ( sortobj , self . pval_fld , ver_list = self . ver_list ) |
def name ( self ) :
"""获取用户名字 .
: return : 用户名字
: rtype : str""" | if self . url is None :
return '匿名用户'
if self . soup is not None :
return self . soup . find ( 'div' , class_ = 'title-section' ) . span . text
else :
assert self . card is not None
return self . card . find ( 'span' , class_ = 'name' ) . text |
def compose_from_srts ( srts , search , searchtype ) :
"""Takes a list of subtitle ( srt ) filenames , search term and search type
and , returns a list of timestamps for composing a supercut .""" | composition = [ ]
foundSearchTerm = False
# Iterate over each subtitles file .
for srt in srts :
print ( srt )
lines = clean_srt ( srt )
videofile = ""
foundVideoFile = False
print ( "[+] Searching for video file corresponding to '" + srt + "'." )
for ext in usable_extensions :
tempVideoFile = srt . replace ( '.srt' , '.' + ext )
if os . path . isfile ( tempVideoFile ) :
videofile = tempVideoFile
foundVideoFile = True
print ( "[+] Found '" + tempVideoFile + "'." )
# If a correspndong video file was found for this subtitles file . . .
if foundVideoFile : # Check that the subtitles file contains subtitles .
if lines : # Iterate over each line in the current subtitles file .
for timespan in lines . keys ( ) :
line = lines [ timespan ] . strip ( )
# If this line contains the search term
if search_line ( line , search , searchtype ) :
foundSearchTerm = True
# Extract the timespan for this subtitle .
start , end = convert_timespan ( timespan )
# Record this occurance of the search term .
composition . append ( { 'file' : videofile , 'time' : timespan , 'start' : start , 'end' : end , 'line' : line } )
# If the search was unsuccessful .
if foundSearchTerm is False :
print ( "[!] Search term '" + search + "'" + " was not found is subtitle file '" + srt + "'." )
# If no subtitles were found in the current file .
else :
print ( "[!] Subtitle file '" + srt + "' is empty." )
# If no video file was found . . .
else :
print ( "[!] No video file was found which corresponds to subtitle file '" + srt + "'." )
print ( "[!] The following video formats are currently supported:" )
extList = ""
for ext in usable_extensions :
extList += ext + ", "
print ( extList )
return composition |
def get_section_header ( self , section ) :
"""Get a specific section header by index or name .
Args :
section ( int or str ) : The index or name of the section header to return .
Returns :
: class : ` ~ ELF . SectionHeader ` : The section header .
Raises :
KeyError : The requested section header does not exist .""" | self . _ensure_section_headers_loaded ( )
if type ( section ) is int :
return self . _section_headers_by_index [ section ]
else :
return self . _section_headers_by_name [ section ] |
def _form_output ( span_doc : span , output_format : str , relations : Dict , patterns : List ) -> str :
"""Form an output value according to user input of output _ format
Args :
span _ doc : span
format : str
relations : Dict
patterns : List
Returns : str""" | format_value = [ ]
output_inf = [ a_pattern . in_output for a_pattern in patterns ]
for i in range ( len ( output_inf ) ) :
token_range = relations [ i ]
if token_range and output_inf [ i ] :
format_value . append ( span_doc [ token_range [ 0 ] : token_range [ 1 ] ] . text )
if not output_format :
return " " . join ( format_value )
result_str = re . sub ( "{}" , " " . join ( format_value ) , output_format )
positions = re . findall ( "{[0-9]+}" , result_str )
if not positions :
return result_str
position_indices = [ int ( x [ 1 : - 1 ] ) for x in positions ]
if max ( position_indices ) < len ( format_value ) :
result_str = result_str . format ( * format_value )
else :
try :
result_str = result_str . format ( "" , * format_value )
except :
positions = [ x for x in positions if int ( x [ 1 : - 1 ] ) > len ( format_value ) - 1 or int ( x [ 1 : - 1 ] ) < 0 ]
for pos in positions :
result_str = result_str . replace ( pos , "" )
result_str = result_str . format ( * format_value )
return result_str |
def imf ( m ) :
'''Returns
N ( M ) dM
for given mass according to Kroupa IMF , vectorization
available via vimf ( )''' | m1 = 0.08 ;
m2 = 0.50
a1 = 0.30 ;
a2 = 1.30 ;
a3 = 2.3
const2 = m1 ** - a1 - m1 ** - a2
const3 = m2 ** - a2 - m2 ** - a3
if m < 0.08 :
alpha = 0.3
const = - const2 - const3
elif m < 0.50 :
alpha = 1.3
const = - const3
else :
alpha = 2.3
const = 0.0
# print m , alpha , const , m * * - alpha + const
return m ** - alpha + const |
def open ( self ) :
'''Open tunnel''' | if self . is_compatible ( ) :
tunnel_ad_url = self . get_tunnel_ad_url ( )
if not tunnel_ad_url :
return ( "Tunnel server appears to be down." )
cmd = '%s -subdomain=%s -config=%s -log=stdout %s 2>&1 > server.log' % ( self . tunnel_server , tunnel_ad_url , self . tunnel_config , self . local_port )
self . tunnel = subprocess . Popen ( cmd , shell = True )
self . url = '%s.%s' % ( tunnel_ad_url , self . tunnel_host )
self . full_url = 'http://%s.%s:%s' % ( tunnel_ad_url , self . tunnel_host , self . tunnel_port )
self . is_open = True
print "Tunnel URL: %s" % self . full_url
print "Hint: In OSX, you can open a terminal link using cmd + click" |
def visit_BoolOp ( self , node : AST , dfltChaining : bool = True ) -> str :
"""Return ` node ` s operator and operands as inlined expression .""" | op = node . op
with self . op_man ( op ) :
src = self . visit ( op ) . join ( [ self . visit ( node . values [ 0 ] ) ] + [ self . visit ( val , dfltChaining = False ) for val in node . values [ 1 : ] ] )
return self . wrap_expr ( src , dfltChaining ) |
def _set_fcoe ( self , v , load = False ) :
"""Setter method for fcoe , mapped from YANG variable / interface / fcoe ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ fcoe is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ fcoe ( ) directly .
YANG Description : The list of FCoE interfaces . Each row contains FCoE
interface name and its status .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "fcoe_interface_name" , fcoe . fcoe , yang_name = "fcoe" , rest_name = "Fcoe" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'fcoe-interface-name' , extensions = { u'tailf-common' : { u'info' : u'The list of FCoE Logical interfaces' , u'cli-no-key-completion' : None , u'alt-name' : u'Fcoe' , u'sort-priority' : u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL' , u'cli-custom-range-actionpoint' : u'FcoeRangeCliActionpoint' , u'cli-custom-range-enumerator' : u'FcoeRangeCliActionpoint' , u'display-when' : u'/vcsmode/vcs-mode = "true"' , u'cli-full-command' : None , u'callpoint' : u'fcoe_interface_cp' } } ) , is_container = 'list' , yang_name = "fcoe" , rest_name = "Fcoe" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'The list of FCoE Logical interfaces' , u'cli-no-key-completion' : None , u'alt-name' : u'Fcoe' , u'sort-priority' : u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL' , u'cli-custom-range-actionpoint' : u'FcoeRangeCliActionpoint' , u'cli-custom-range-enumerator' : u'FcoeRangeCliActionpoint' , u'display-when' : u'/vcsmode/vcs-mode = "true"' , u'cli-full-command' : None , u'callpoint' : u'fcoe_interface_cp' } } , namespace = 'urn:brocade.com:mgmt:brocade-fcoe' , defining_module = 'brocade-fcoe' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """fcoe must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("fcoe_interface_name",fcoe.fcoe, yang_name="fcoe", rest_name="Fcoe", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='fcoe-interface-name', extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}), is_container='list', yang_name="fcoe", rest_name="Fcoe", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'The list of FCoE Logical interfaces', u'cli-no-key-completion': None, u'alt-name': u'Fcoe', u'sort-priority': u'RUNNCFG_LEVEL_INTERFACE_TYPE_LOGICAL', u'cli-custom-range-actionpoint': u'FcoeRangeCliActionpoint', u'cli-custom-range-enumerator': u'FcoeRangeCliActionpoint', u'display-when': u'/vcsmode/vcs-mode = "true"', u'cli-full-command': None, u'callpoint': u'fcoe_interface_cp'}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='list', is_config=True)""" , } )
self . __fcoe = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def compute_signature ( self , uri , params , utf = PY3 ) :
"""Compute the signature for a given request
: param uri : full URI that Twilio requested on your server
: param params : post vars that Twilio sent with the request
: param utf : whether return should be bytestring or unicode ( python3)
: returns : The computed signature""" | s = uri
if len ( params ) > 0 :
for k , v in sorted ( params . items ( ) ) :
s += k + v
# compute signature and compare signatures
mac = hmac . new ( self . token , s . encode ( "utf-8" ) , sha1 )
computed = base64 . b64encode ( mac . digest ( ) )
if utf :
computed = computed . decode ( 'utf-8' )
return computed . strip ( ) |
def validate ( cls , obj , raiseException = False , complainUnrecognized = False ) :
"""Check if the object satisfies this behavior ' s requirements .
@ param obj :
The L { ContentLine < base . ContentLine > } or
L { Component < base . Component > } to be validated .
@ param raiseException :
If True , raise a L { base . ValidateError } on validation failure .
Otherwise return a boolean .
@ param complainUnrecognized :
If True , fail to validate if an uncrecognized parameter or child is
found . Otherwise log the lack of recognition .""" | if not cls . allowGroup and obj . group is not None :
err = "{0} has a group, but this object doesn't support groups" . format ( obj )
raise base . VObjectError ( err )
if isinstance ( obj , base . ContentLine ) :
return cls . lineValidate ( obj , raiseException , complainUnrecognized )
elif isinstance ( obj , base . Component ) :
count = { }
for child in obj . getChildren ( ) :
if not child . validate ( raiseException , complainUnrecognized ) :
return False
name = child . name . upper ( )
count [ name ] = count . get ( name , 0 ) + 1
for key , val in cls . knownChildren . items ( ) :
if count . get ( key , 0 ) < val [ 0 ] :
if raiseException :
m = "{0} components must contain at least {1} {2}"
raise base . ValidateError ( m . format ( cls . name , val [ 0 ] , key ) )
return False
if val [ 1 ] and count . get ( key , 0 ) > val [ 1 ] :
if raiseException :
m = "{0} components cannot contain more than {1} {2}"
raise base . ValidateError ( m . format ( cls . name , val [ 1 ] , key ) )
return False
return True
else :
err = "{0} is not a Component or Contentline" . format ( obj )
raise base . VObjectError ( err ) |
def organize ( dirs , config , run_info_yaml , sample_names = None , is_cwl = False , integrations = None ) :
"""Organize run information from a passed YAML file or the Galaxy API .
Creates the high level structure used for subsequent processing .
sample _ names is a list of samples to include from the overall file , for cases
where we are running multiple pipelines from the same configuration file .""" | from bcbio . pipeline import qcsummary
if integrations is None :
integrations = { }
logger . info ( "Using input YAML configuration: %s" % run_info_yaml )
assert run_info_yaml and os . path . exists ( run_info_yaml ) , "Did not find input sample YAML file: %s" % run_info_yaml
run_details = _run_info_from_yaml ( dirs , run_info_yaml , config , sample_names , is_cwl = is_cwl , integrations = integrations )
remote_retriever = None
for iname , retriever in integrations . items ( ) :
if iname in config :
run_details = retriever . add_remotes ( run_details , config [ iname ] )
remote_retriever = retriever
out = [ ]
for item in run_details :
item [ "dirs" ] = dirs
if "name" not in item :
item [ "name" ] = [ "" , item [ "description" ] ]
elif isinstance ( item [ "name" ] , six . string_types ) :
description = "%s-%s" % ( item [ "name" ] , clean_name ( item [ "description" ] ) )
item [ "name" ] = [ item [ "name" ] , description ]
item [ "description" ] = description
# add algorithm details to configuration , avoid double specification
item [ "resources" ] = _add_remote_resources ( item [ "resources" ] )
item [ "config" ] = config_utils . update_w_custom ( config , item )
item . pop ( "algorithm" , None )
item = add_reference_resources ( item , remote_retriever )
item [ "config" ] [ "algorithm" ] [ "qc" ] = qcsummary . get_qc_tools ( item )
item [ "config" ] [ "algorithm" ] [ "vcfanno" ] = vcfanno . find_annotations ( item , remote_retriever )
# Create temporary directories and make absolute , expanding environmental variables
tmp_dir = tz . get_in ( [ "config" , "resources" , "tmp" , "dir" ] , item )
if tmp_dir : # if no environmental variables , make and normalize the directory
# otherwise we normalize later in distributed . transaction :
if os . path . expandvars ( tmp_dir ) == tmp_dir :
tmp_dir = utils . safe_makedir ( os . path . expandvars ( tmp_dir ) )
tmp_dir = genome . abs_file_paths ( tmp_dir , do_download = not integrations )
item [ "config" ] [ "resources" ] [ "tmp" ] [ "dir" ] = tmp_dir
out . append ( item )
out = _add_provenance ( out , dirs , config , not is_cwl )
return out |
def initialize ( self ) :
"""initialize the object""" | self . network_status = self . get_network_status ( )
self . name = self . network_status . get ( 'network_name' , 'Unknown' )
self . location_info = self . get_location_info ( )
self . device_info = self . get_device_info ( )
self . device_id = ( self . device_info . get ( 'device_id' ) if self . device_info else "Unknown" )
self . initialize_socket ( )
self . initialize_worker ( )
self . initialize_zones ( ) |
def summarize_tensors ( tensor_dict , tag = None ) :
"""Summarize the tensors .
Args :
tensor _ dict : a dictionary of tensors .
tag : name scope of the summary ; defaults to tensors / .""" | if tag is None :
tag = "tensors/"
for t_name in list ( tensor_dict ) :
t = tensor_dict [ t_name ]
tf . summary . histogram ( tag + t_name , t ) |
def check_url ( aggregate ) :
"""Helper function waiting for URL queue .""" | while True :
try :
aggregate . urlqueue . join ( timeout = 30 )
break
except urlqueue . Timeout : # Cleanup threads every 30 seconds
aggregate . remove_stopped_threads ( )
if not any ( aggregate . get_check_threads ( ) ) :
break |
def supports ( cls , template_file = None ) :
""": return : Whether the engine can process given template file or not .""" | if anytemplate . compat . IS_PYTHON_3 :
cls . _priority = 99
return False
# Always as it ' s not ported to python 3.
return super ( Engine , cls ) . supports ( template_file = template_file ) |
def addItem ( self , title , link , description , date , append = True ) :
"""Insert an item .""" | item = self . rss . createElement ( 'item' )
self . addElement ( item , 'title' , title )
self . addElement ( item , 'link' , link )
self . addElement ( item , 'description' , description )
self . addElement ( item , 'guid' , link )
self . addElement ( item , 'pubDate' , date )
if append :
self . channel . appendChild ( item )
else :
elems = self . rss . getElementsByTagName ( 'item' )
if elems :
self . channel . insertBefore ( item , elems [ 0 ] )
else :
self . channel . appendChild ( item ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.