signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def decode_list_oov ( self , ids , source_oov_id_to_token ) :
"""decode ids back to tokens , considering OOVs temporary IDs .
Args :
ids : vocab ids . Could possibly include source temporary OOV ID starting
from vocab _ size .
source _ oov _ id _ to _ token : a list of source OOV tokens , with the order the
same as they appear in the source .
Returns :
decoded tokens , possibly including source OOV tokens .""" | seq = reversed ( ids ) if self . _reverse else ids
tokens = [ ]
for cur_id in seq :
if cur_id in self . _id_to_token :
tokens . append ( self . _id_to_token [ cur_id ] )
else :
tokens . append ( source_oov_id_to_token [ cur_id - self . vocab_size ] )
return tokens |
def session_callback ( self , signal ) :
"""Signalling from stream session .
Data - new data available for processing .
Playing - Connection is healthy .
Retry - if there is no connection to device .""" | if signal == SIGNAL_DATA :
self . event . new_event ( self . data )
elif signal == SIGNAL_FAILED :
self . retry ( )
if signal in [ SIGNAL_PLAYING , SIGNAL_FAILED ] and self . connection_status_callback :
self . connection_status_callback ( signal ) |
def collect_transitive_dependencies ( collected : Set [ str ] , dep_graph : DepGraph , from_name : str ) -> None :
"""Collect transitive dependencies .
From a dependency graph , collects a list of transitive dependencies by recursing
through a dependency graph .""" | immediate_deps = dep_graph [ from_name ]
for to_name in immediate_deps :
if to_name not in collected :
collected . add ( to_name )
collect_transitive_dependencies ( collected , dep_graph , to_name ) |
def _validate_row_label ( label , column_type_map ) :
"""Validate a row label column .
Parameters
label : str
Name of the row label column .
column _ type _ map : dict [ str , type ]
Dictionary mapping the name of each column in an SFrame to the type of
the values in the column .""" | if not isinstance ( label , str ) :
raise TypeError ( "The row label column name must be a string." )
if not label in column_type_map . keys ( ) :
raise ToolkitError ( "Row label column not found in the dataset." )
if not column_type_map [ label ] in ( str , int ) :
raise TypeError ( "Row labels must be integers or strings." ) |
def deserialize ( cls , target_class , obj ) :
""": type target _ class : object _ . ShareDetail | type
: type obj : dict
: rtype : object _ . ShareDetail""" | share_detail = target_class . __new__ ( target_class )
share_detail . __dict__ = { cls . _ATTRIBUTE_PAYMENT : converter . deserialize ( object_ . ShareDetailPayment , cls . _get_field_or_none ( cls . _FIELD_DRAFT_PAYMENT , obj ) ) , cls . _ATTRIBUTE_READ_ONLY : converter . deserialize ( object_ . ShareDetailReadOnly , cls . _get_field_or_none ( cls . _FIELD_READ_ONLY , obj ) ) , cls . _ATTRIBUTE_DRAFT_PAYMENT : converter . deserialize ( object_ . ShareDetailDraftPayment , cls . _get_field_or_none ( cls . _FIELD_DRAFT_PAYMENT , obj ) ) , }
return share_detail |
def zero_crossing_before ( self , n ) :
"""Find nearest zero crossing in waveform before frame ` ` n ` `""" | n_in_samples = int ( n * self . samplerate )
search_start = n_in_samples - self . samplerate
if search_start < 0 :
search_start = 0
frame = zero_crossing_last ( self . range_as_mono ( search_start , n_in_samples ) ) + search_start
return frame / float ( self . samplerate ) |
def add_errors ( self , property_name , errors ) :
"""Add one or several errors to properties .
: param property _ name : str , property name
: param errors : list or Error , error object ( s )
: return : shiftschema . result . Result""" | if type ( errors ) is not list :
errors = [ errors ]
for error in errors :
if not isinstance ( error , Error ) :
err = 'Error must be of type {}'
raise x . InvalidErrorType ( err . format ( Error ) )
if property_name in self . errors :
self . errors [ property_name ] . extend ( errors )
else :
self . errors [ property_name ] = errors
return self |
def get_position ( ) :
"""Returns the mouse ' s location as a tuple of ( x , y ) .""" | e = Quartz . CGEventCreate ( None )
point = Quartz . CGEventGetLocation ( e )
return ( point . x , point . y ) |
def semantic_distance ( go_id1 , go_id2 , godag , branch_dist = None ) :
'''Finds the semantic distance ( minimum number of connecting branches )
between two GO terms .''' | return min_branch_length ( go_id1 , go_id2 , godag , branch_dist ) |
def calculate_within_class_scatter_matrix ( X , y ) :
"""Calculates the Within - Class Scatter matrix
Parameters :
X : array - like , shape ( m , n ) - the samples
y : array - like , shape ( m , ) - the class labels
Returns :
within _ class _ scatter _ matrix : array - like , shape ( n , n )""" | mean_vectors = calculate_mean_vectors ( X , y )
n_features = X . shape [ 1 ]
Sw = np . zeros ( ( n_features , n_features ) )
for cl , m in zip ( np . unique ( y ) , mean_vectors ) :
Si = np . zeros ( ( n_features , n_features ) )
m = m . reshape ( n_features , 1 )
for x in X [ y == cl , : ] :
v = x . reshape ( n_features , 1 ) - m
Si += v @ v . T
Sw += Si
return Sw |
def generate_next_population ( individuals , toolbox ) :
"""Perform truncated selection with elitism .
: param individuals :
: param toolbox :
: return :""" | individuals = [ toolbox . clone ( ind ) for ind in individuals ]
individuals . sort ( key = lambda x : x . error )
offspring = [ ]
pop_size = len ( individuals )
num_top = math . floor ( pop_size / 2 )
parents = individuals [ 0 : num_top + 1 ]
for _ in range ( pop_size - 1 ) :
off = toolbox . clone ( random . choice ( parents ) )
off = toolbox . mutate ( off ) [ 0 ]
offspring . append ( off )
offspring . append ( individuals [ 0 ] )
return offspring |
def connect_engine ( self ) :
"""Establish a connection to the database .
Provides simple error handling for fatal errors .
Returns :
True , if we could establish a connection , else False .""" | try :
self . connection = self . engine . connect ( )
return True
except sa . exc . OperationalError as opex :
LOG . fatal ( "Could not connect to the database. The error was: '%s'" , str ( opex ) )
return False |
def _escape_parameters ( self , char ) :
"""Parse parameters in an escape sequence . Parameters are a list of
numbers in ascii ( e . g . ' 12 ' , ' 4 ' , ' 42 ' , etc ) separated by a semicolon
( e . g . " 12;4;42 " ) .
See the [ vt102 user guide ] ( http : / / vt100 . net / docs / vt102 - ug / ) for more
details on the formatting of escape parameters .""" | if char == ";" :
self . params . append ( int ( self . current_param ) )
self . current_param = ""
elif char == "?" :
self . state = "mode"
elif not char . isdigit ( ) :
if len ( self . current_param ) > 0 :
self . params . append ( int ( self . current_param ) )
# If we ' re in parameter parsing mode , but we see a non - numeric
# value , it must be the end of the control sequence .
self . _end_escape_sequence ( char )
else :
self . current_param += char |
def modify_ack_deadline ( self , seconds ) :
"""Resets the deadline for acknowledgement .
New deadline will be the given value of seconds from now .
The default implementation handles this for you ; you should not need
to manually deal with setting ack deadlines . The exception case is
if you are implementing your own custom subclass of
: class : ` ~ . pubsub _ v1 . subcriber . _ consumer . Consumer ` .
Args :
seconds ( int ) : The number of seconds to set the lease deadline
to . This should be between 0 and 600 . Due to network latency ,
values below 10 are advised against .""" | self . _request_queue . put ( requests . ModAckRequest ( ack_id = self . _ack_id , seconds = seconds ) ) |
def _setup ( self ) :
"""Generates _ field _ map , _ field _ ids and _ oid _ nums for use in parsing""" | cls = self . __class__
cls . _field_map = { }
cls . _field_ids = [ ]
cls . _precomputed_specs = [ ]
for index , field in enumerate ( cls . _fields ) :
if len ( field ) < 3 :
field = field + ( { } , )
cls . _fields [ index ] = field
cls . _field_map [ field [ 0 ] ] = index
cls . _field_ids . append ( _build_id_tuple ( field [ 2 ] , field [ 1 ] ) )
if cls . _oid_pair is not None :
cls . _oid_nums = ( cls . _field_map [ cls . _oid_pair [ 0 ] ] , cls . _field_map [ cls . _oid_pair [ 1 ] ] )
for index , field in enumerate ( cls . _fields ) :
has_callback = cls . _spec_callbacks is not None and field [ 0 ] in cls . _spec_callbacks
is_mapped_oid = cls . _oid_nums is not None and cls . _oid_nums [ 1 ] == index
if has_callback or is_mapped_oid :
cls . _precomputed_specs . append ( None )
else :
cls . _precomputed_specs . append ( ( field [ 0 ] , field [ 1 ] , field [ 1 ] , field [ 2 ] , None ) ) |
def update_token ( self ) :
"""Request a new token and store it for future use""" | logger . info ( 'updating token' )
if None in self . credentials . values ( ) :
raise RuntimeError ( "You must provide an username and a password" )
credentials = dict ( auth = self . credentials )
url = self . test_url if self . test else self . url
response = requests . post ( url + "auth" , json = credentials )
data = response . json ( ) [ "response" ]
if "error_id" in data and data [ "error_id" ] == "NOAUTH" :
raise BadCredentials ( )
if "error_code" in data and data [ "error_code" ] == "RATE_EXCEEDED" :
time . sleep ( 150 )
return
if "error_code" in data or "error_id" in data :
raise AppNexusException ( response )
self . token = data [ "token" ]
self . save_token ( )
return self . token |
def from_names ( cls , * names ) :
"""Create a new ` ChannelList ` from a list of names
The list of names can include comma - separated sets of names ,
in which case the return will be a flattened list of all parsed
channel names .""" | new = cls ( )
for namestr in names :
for name in cls . _split_names ( namestr ) :
new . append ( Channel ( name ) )
return new |
def add_text ( text , x = 0.01 , y = 0.01 , axes = "gca" , draw = True , ** kwargs ) :
"""Adds text to the axes at the specified position .
* * kwargs go to the axes . text ( ) function .""" | if axes == "gca" :
axes = _pylab . gca ( )
axes . text ( x , y , text , transform = axes . transAxes , ** kwargs )
if draw :
_pylab . draw ( ) |
from collections import defaultdict
def count_unique_keys ( input_list ) :
"""This function counts the unique keys for each value present in a tuple list .
> > > count _ unique _ keys ( [ ( 3 , 4 ) , ( 1 , 2 ) , ( 2 , 4 ) , ( 8 , 2 ) , ( 7 , 2 ) , ( 8 , 1 ) , ( 9 , 1 ) , ( 8 , 4 ) , ( 10 , 4 ) ] )
' { 4 : 4 , 2 : 3 , 1 : 2 } '
> > > count _ unique _ keys ( [ ( 4 , 5 ) , ( 2 , 3 ) , ( 3 , 5 ) , ( 9 , 3 ) , ( 8 , 3 ) , ( 9 , 2 ) , ( 10 , 2 ) , ( 9 , 5 ) , ( 11 , 5 ) ] )
' { 5 : 4 , 3 : 3 , 2 : 2 } '
> > > count _ unique _ keys ( [ ( 6 , 5 ) , ( 3 , 4 ) , ( 2 , 6 ) , ( 11 , 1 ) , ( 8 , 22 ) , ( 8 , 11 ) , ( 4 , 3 ) , ( 14 , 3 ) , ( 11 , 6 ) ] )
' { 5 : 1 , 4 : 1 , 6 : 2 , 1 : 1 , 22 : 1 , 11 : 1 , 3 : 2 } '""" | val_to_key_dict = defaultdict ( list )
for key_val in input_list :
val_to_key_dict [ key_val [ 1 ] ] . append ( key_val [ 0 ] )
output_dict = { }
for value , keys in val_to_key_dict . items ( ) :
output_dict [ value ] = len ( set ( keys ) )
return str ( output_dict ) |
def download_preview ( self , image , url_field = 'url' ) :
"""Downlaod the binary data of an image attachment at preview size .
: param str url _ field : the field of the image with the right URL
: return : binary image data
: rtype : bytes""" | return self . download ( image , url_field = url_field , suffix = 'preview' ) |
def addNoise ( vecs , percent = 0.1 , n = 2048 ) :
"""Add noise to the given sequence of vectors and return the modified sequence .
A percentage of the on bits are shuffled to other locations .""" | noisyVecs = [ ]
for vec in vecs :
nv = vec . copy ( )
for idx in vec :
if numpy . random . random ( ) <= percent :
nv . discard ( idx )
nv . add ( numpy . random . randint ( n ) )
noisyVecs . append ( nv )
return noisyVecs |
def _get_covered_instructions ( self ) -> int :
"""Gets the total number of covered instructions for all accounts in
the svm .
: return :""" | total_covered_instructions = 0
for _ , cv in self . coverage . items ( ) :
total_covered_instructions += sum ( cv [ 1 ] )
return total_covered_instructions |
def follow_double_underscores ( obj , field_name = None , excel_dialect = True , eval_python = False , index_error_value = None ) :
'''Like getattr ( obj , field _ name ) only follows model relationships through " _ _ " or " . " as link separators
> > > from django . contrib . auth . models import Permission
> > > import math
> > > p = Permission . objects . all ( ) [ 0]
> > > follow _ double _ underscores ( p , ' content _ type _ _ name ' ) = = p . content _ type . name
True
> > > follow _ double _ underscores ( p , ' math . sqrt ( len ( obj . content _ type . name ) ) ' , eval _ python = True ) = = math . sqrt ( len ( p . content _ type . name ) )
True''' | if not obj :
return obj
if isinstance ( field_name , list ) :
split_fields = field_name
else :
split_fields = re_model_instance_dot . split ( field_name )
if False and eval_python :
try :
return eval ( field_name , { 'datetime' : datetime , 'math' : math , 'collections' : collections } , { 'obj' : obj } )
except IndexError :
return index_error_value
except :
pass
if len ( split_fields ) <= 1 :
if hasattr ( obj , split_fields [ 0 ] ) :
value = getattr ( obj , split_fields [ 0 ] )
elif hasattr ( obj , split_fields [ 0 ] + '_id' ) :
value = getattr ( obj , split_fields [ 0 ] + '_id' )
elif hasattr ( obj , split_fields [ 0 ] + '_set' ) :
value = getattr ( obj , split_fields [ 0 ] + '_set' )
elif split_fields [ 0 ] in obj . __dict__ :
value = obj . __dict__ . get ( split_fields [ 0 ] )
elif eval_python :
value = eval ( 'obj.' + split_fields [ 0 ] )
else :
return follow_double_underscores ( getattr ( obj , split_fields [ 0 ] ) , field_name = split_fields [ 1 : ] , eval_python = eval_python , index_error_value = index_error_value )
if value and excel_dialect and isinstance ( value , datetime . datetime ) :
value = value . strftime ( '%Y-%m-%d %H:%M:%S' )
return value
return follow_double_underscores ( getattr ( obj , split_fields [ 0 ] ) , field_name = split_fields [ 1 : ] , eval_python = eval_python , index_error_value = index_error_value ) |
def reset_priorities ( self , dict_name , priority ) :
'''set all priorities in dict _ name to priority
: type priority : float or int''' | if self . _session_lock_identifier is None :
raise ProgrammerError ( 'must acquire lock first' )
# # see comment above for script in update
conn = redis . Redis ( connection_pool = self . pool )
script = conn . register_script ( '''
if redis.call("get", KEYS[1]) == ARGV[1]
then
local keys = redis.call('ZRANGE', KEYS[2], 0, -1)
for i, next_key in ipairs(keys) do
redis.call("zadd", KEYS[2], ARGV[2], next_key)
end
return 1
else
-- ERROR: No longer own the lock
return 0
end
''' )
dict_name = self . _namespace ( dict_name )
res = script ( keys = [ self . _lock_name , dict_name + 'keys' ] , args = [ self . _session_lock_identifier , priority ] )
if not res : # We either lost the lock or something else went wrong
raise EnvironmentError ( 'Unable to add items to %s in registry' % dict_name ) |
def fix_config ( self , options ) :
"""Fixes the options , if necessary . I . e . , it adds all required elements to the dictionary .
: param options : the options to fix
: type options : dict
: return : the ( potentially ) fixed options
: rtype : dict""" | opt = "incremental"
if opt not in options :
options [ opt ] = False
if opt not in self . help :
self . help [ opt ] = "Whether to load the dataset incrementally (bool)."
opt = "use_custom_loader"
if opt not in options :
options [ opt ] = False
if opt not in self . help :
self . help [ opt ] = "Whether to use a custom loader."
opt = "custom_loader"
if opt not in options :
options [ opt ] = converters . Loader ( classname = "weka.core.converters.ArffLoader" )
if opt not in self . help :
self . help [ opt ] = "The custom loader to use (Loader)."
return super ( LoadDataset , self ) . fix_config ( options ) |
def worker_status ( worker , profile = 'default' ) :
'''Return the state of the worker
CLI Examples :
. . code - block : : bash
salt ' * ' modjk . worker _ status node1
salt ' * ' modjk . worker _ status node1 other - profile''' | config = get_running ( profile )
try :
return { 'activation' : config [ 'worker.{0}.activation' . format ( worker ) ] , 'state' : config [ 'worker.{0}.state' . format ( worker ) ] , }
except KeyError :
return False |
def str_to_date ( date : str ) -> datetime . datetime :
"""Convert cbr . ru API date ste to python datetime
: param date : date from API response
: return : date like datetime
: rtype : datetime""" | date = date . split ( '.' )
date . reverse ( )
y , m , d = date
return datetime . datetime ( int ( y ) , int ( m ) , int ( d ) ) |
def get_factory_kwargs ( self ) :
"""Returns the keyword arguments for calling the formset factory""" | kwargs = { }
kwargs . update ( { 'can_delete' : self . can_delete , 'extra' : self . extra , 'exclude' : self . exclude , 'fields' : self . fields , 'formfield_callback' : self . formfield_callback , 'fk_name' : self . fk_name , } )
if self . formset_class :
kwargs [ 'formset' ] = self . formset_class
if self . child_form :
kwargs [ 'form' ] = self . child_form
return kwargs |
def change_bgcolor_enable ( self , state ) :
"""This is implementet so column min / max is only active when bgcolor is""" | self . dataModel . bgcolor ( state )
self . bgcolor_global . setEnabled ( not self . is_series and state > 0 ) |
def getWindow ( title , exact = False ) :
"""Return Window object if ' title ' or its part found in visible windows titles , else return None
Return only 1 window found first
Args :
title : unicode string
exact ( bool ) : True if search only exact match""" | titles = getWindows ( )
hwnd = titles . get ( title , None )
if not hwnd and not exact :
for k , v in titles . items ( ) :
if title in k :
hwnd = v
break
if hwnd :
return Window ( hwnd )
else :
return None |
def WriteEventBody ( self , event ) :
"""Writes the body of an event to the output .
Args :
event ( EventObject ) : event .""" | output_string = NativePythonFormatterHelper . GetFormattedEventObject ( event )
self . _output_writer . Write ( output_string ) |
def pauli_pow ( pauli : Pauli , exponent : int ) -> Pauli :
"""Raise an element of the Pauli algebra to a non - negative integer power .""" | if not isinstance ( exponent , int ) or exponent < 0 :
raise ValueError ( "The exponent must be a non-negative integer." )
if exponent == 0 :
return Pauli . identity ( )
if exponent == 1 :
return pauli
# https : / / en . wikipedia . org / wiki / Exponentiation _ by _ squaring
y = Pauli . identity ( )
x = pauli
n = exponent
while n > 1 :
if n % 2 == 0 : # Even
x = x * x
n = n // 2
else : # Odd
y = x * y
x = x * x
n = ( n - 1 ) // 2
return x * y |
def K_swing_check_valve_Crane ( D = None , fd = None , angled = True ) :
r'''Returns the loss coefficient for a swing check valve as shown in [ 1 ] _ .
. . math : :
K _ 2 = N \ cdot f _ d
For angled swing check valves N = 100 ; for straight valves , N = 50.
Parameters
D : float , optional
Diameter of the pipe attached to the valve , [ m ]
fd : float , optional
Darcy friction factor calculated for the actual pipe flow in clean
steel ( roughness = 0.0018 inch ) in the fully developed turbulent
region ; do not specify this to use the original Crane friction factor ! ,
angled : bool , optional
If True , returns a value 2x the unangled value ; the style of the valve
Returns
K : float
Loss coefficient with respect to the pipe inside diameter [ - ]
Notes
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions .
Examples
> > > K _ swing _ check _ valve _ Crane ( D = . 02)
2.3974274785373257
References
. . [ 1 ] Crane Co . Flow of Fluids Through Valves , Fittings , and Pipe . Crane ,
2009.''' | if D is None and fd is None :
raise ValueError ( 'Either `D` or `fd` must be specified' )
if fd is None :
fd = ft_Crane ( D )
if angled :
return 100. * fd
return 50. * fd |
def parse_range_header ( self , header , resource_size ) :
"""Parses a range header into a list of two - tuples ( start , stop ) where
` start ` is the starting byte of the range ( inclusive ) and
` stop ` is the ending byte position of the range ( exclusive ) .
Args :
header ( str ) : The HTTP _ RANGE request header .
resource _ size ( int ) : The size of the file in bytes .
Returns :
None if the value of the header is not syntatically valid .""" | if not header or '=' not in header :
return None
ranges = [ ]
units , range_ = header . split ( '=' , 1 )
units = units . strip ( ) . lower ( )
if units != 'bytes' :
return None
for val in range_ . split ( ',' ) :
val = val . strip ( )
if '-' not in val :
return None
if val . startswith ( '-' ) : # suffix - byte - range - spec : this form specifies the last N bytes
# of an entity - body .
start = resource_size + int ( val )
if start < 0 :
start = 0
stop = resource_size
else : # byte - range - spec : first - byte - pos " - " [ last - byte - pos ] .
start , stop = val . split ( '-' , 1 )
start = int ( start )
# The + 1 is here since we want the stopping point to be
# exclusive , whereas in the HTTP spec , the last - byte - pos
# is inclusive .
stop = int ( stop ) + 1 if stop else resource_size
if start >= stop :
return None
ranges . append ( ( start , stop ) )
return ranges |
def error_catcher ( self , extra_info : Optional [ str ] = None ) :
"""Context manager to catch , print and record InstaloaderExceptions .
: param extra _ info : String to prefix error message with .""" | try :
yield
except InstaloaderException as err :
if extra_info :
self . error ( '{}: {}' . format ( extra_info , err ) )
else :
self . error ( '{}' . format ( err ) )
if self . raise_all_errors :
raise |
def _parse_json_with_fieldnames ( self ) :
"""Parse the raw JSON with all attributes / methods defined in the class , except for the
ones defined starting with ' _ ' or flagged in cls . _ TO _ EXCLUDE .
The final result is stored in self . json""" | for key in dir ( self ) :
if not key . startswith ( '_' ) and key not in self . _TO_EXCLUDE :
self . fieldnames . append ( key )
value = getattr ( self , key )
if value :
self . json [ key ] = value
# Add OK attribute even if value is " False "
self . json [ 'ok' ] = self . ok |
def getNextService ( self , discover ) :
"""Return the next authentication service for the pair of
user _ input and session . This function handles fallback .
@ param discover : a callable that takes a URL and returns a
list of services
@ type discover : str - > [ service ]
@ return : the next available service""" | manager = self . getManager ( )
if manager is not None and not manager :
self . destroyManager ( )
if not manager :
yadis_url , services = discover ( self . url )
manager = self . createManager ( services , yadis_url )
if manager :
service = manager . next ( )
manager . store ( self . session )
else :
service = None
return service |
def connection ( self , collectionname , dbname = None ) :
"""Get a cursor to a collection by name .
raises ` DataError ` on names with unallowable characters .
: Parameters :
- ` collectionname ` : the name of the collection
- ` dbname ` : ( optional ) overide the default db for a connection""" | if not collectionname or ".." in collectionname :
raise DataError ( "collection names cannot be empty" )
if "$" in collectionname and not ( collectionname . startswith ( "oplog.$main" ) or collectionname . startswith ( "$cmd" ) ) :
raise DataError ( "collection names must not " "contain '$': %r" % collectionname )
if collectionname . startswith ( "." ) or collectionname . endswith ( "." ) :
raise DataError ( "collecion names must not start " "or end with '.': %r" % collectionname )
if "\x00" in collectionname :
raise DataError ( "collection names must not contain the " "null character" )
return Cursor ( dbname or self . _pool . _dbname , collectionname , self . _pool ) |
def saveas ( self , filename , lineendings = 'default' , encoding = 'latin-1' ) :
"""Save the IDF as a text file with the filename passed .
Parameters
filename : str
Filepath to to set the idfname attribute to and save the file as .
lineendings : str , optional
Line endings to use in the saved file . Options are ' default ' ,
' windows ' and ' unix ' the default is ' default ' which uses the line
endings for the current system .
encoding : str , optional
Encoding to use for the saved file . The default is ' latin - 1 ' which
is compatible with the EnergyPlus IDFEditor .""" | self . idfname = filename
self . save ( filename , lineendings , encoding ) |
def historical ( self , date , base = 'USD' ) :
"""Fetches historical exchange rate data from service
: Example Data :
disclaimer : " < Disclaimer data > " ,
license : " < License data > " ,
timestamp : 1358150409,
base : " USD " ,
rates : {
AED : 3.666311,
AFN : 51.2281,
ALL : 104.748751,
AMD : 406.919999,
ANG : 1.7831,""" | try :
resp = self . client . get ( self . ENDPOINT_HISTORICAL % date . strftime ( "%Y-%m-%d" ) , params = { 'base' : base } )
resp . raise_for_status ( )
except requests . exceptions . RequestException as e :
raise OpenExchangeRatesClientException ( e )
return resp . json ( parse_int = decimal . Decimal , parse_float = decimal . Decimal ) |
def GetHostMemSwappedMB ( self ) :
'''Undocumented .''' | counter = c_uint ( )
ret = vmGuestLib . VMGuestLib_GetHostMemSwappedMB ( self . handle . value , byref ( counter ) )
if ret != VMGUESTLIB_ERROR_SUCCESS :
raise VMGuestLibException ( ret )
return counter . value |
def to_bqm ( self , model ) :
"""Given a pysmt model , return a bqm .
Adds the values of the biases as determined by the SMT solver to a bqm .
Args :
model : A pysmt model .
Returns :
: obj : ` dimod . BinaryQuadraticModel `""" | linear = ( ( v , float ( model . get_py_value ( bias ) ) ) for v , bias in self . linear . items ( ) )
quadratic = ( ( u , v , float ( model . get_py_value ( bias ) ) ) for ( u , v ) , bias in self . quadratic . items ( ) )
offset = float ( model . get_py_value ( self . offset ) )
return dimod . BinaryQuadraticModel ( linear , quadratic , offset , dimod . SPIN ) |
def _create_child ( self , tag ) :
"""Create a new child element with the given tag .""" | return etree . SubElement ( self . _root , self . _get_namespace_tag ( tag ) ) |
def view ( url : str , ** kwargs ) -> bool :
"""View the page whether rendered properly . ( ensure the < base > tag to make external links work )
Args :
url ( str ) : The url of the site .""" | kwargs . setdefault ( 'headers' , DEFAULT_HEADERS )
html = requests . get ( url , ** kwargs ) . content
if b'<base' not in html :
repl = f'<head><base href="{url}">'
html = html . replace ( b'<head>' , repl . encode ( 'utf-8' ) )
fd , fname = tempfile . mkstemp ( '.html' )
os . write ( fd , html )
os . close ( fd )
return webbrowser . open ( f'file://{fname}' ) |
def channels ( self ) :
"""Gets channels of current team
Returns :
list of Channel
Throws :
RTMServiceError when request failed""" | resp = self . _rtm_client . get ( 'v1/current_team.channels' )
if resp . is_fail ( ) :
raise RTMServiceError ( 'Failed to get channels of current team' , resp )
return resp . data [ 'result' ] |
def pow2_quantized_convolution ( inp , outmaps , kernel , pad = None , stride = None , dilation = None , group = 1 , w_init = None , b_init = None , base_axis = 1 , fix_parameters = False , rng = None , with_bias = True , quantize_w = True , with_zero_w = False , sign_w = True , n_w = 8 , m_w = 2 , ste_fine_grained_w = True , quantize_b = True , with_zero_b = False , sign_b = True , n_b = 8 , m_b = 2 , ste_fine_grained_b = True , ) :
"""Pow2 Quantized Convolution .
Pow2 Quantized Convolution is the convolution function ,
except the definition of the inner product is modified .
The input - output relation of this function is as follows :
. . math : :
y _ { n , a , b } = \ sum _ { m } \ sum _ { i } \ sum _ { j } Q ( w _ { n , m , i , j } ) x _ { m , a + i , b + j } ,
where : math : ` Q ( w _ { n , m , i , j } ) ` is the power - of - 2 quantization function .
. . note : :
1 ) if you would like to share weights between some layers , please
make sure to share the standard , floating value weights ( ` weight ` )
and not the quantized weights ( ` quantized weight ` )
2 ) The weights and the quantized weights become synced only after : func : ` ~ nnabla . _ variable . Variable . forward ` is called ,
and not after a call to : func : ` ~ nnabla . _ variable . Variable . backward ` .
To access the parameters of the network , remember to call : func : ` ~ nnabla . _ variable . Variable . forward ` once before doing so , otherwise the
float weights and the quantized weights will not be in sync .
3 ) Quantized values are stored as floating point number for ` quantized weight ` ,
since this function is only for simulation purposes .
Args :
inp ( ~ nnabla . Variable ) : N - D array .
outmaps ( int ) : Number of convolution kernels ( which is equal to the number of output channels ) . For example , to apply convolution on an input with 16 types of filters , specify 16.
kernel ( : obj : ` tuple ` of : obj : ` int ` ) : Convolution kernel size . For example , to apply convolution on an image with a 3 ( height ) by 5 ( width ) two - dimensional kernel , specify ( 3,5 ) .
pad ( : obj : ` tuple ` of : obj : ` int ` ) : Padding sizes for dimensions .
stride ( : obj : ` tuple ` of : obj : ` int ` ) : Stride sizes for dimensions .
dilation ( : obj : ` tuple ` of : obj : ` int ` ) : Dilation sizes for dimensions .
group ( int ) : Number of groups of channels . This makes connections across channels more sparse by grouping connections along map direction .
w _ init ( : obj : ` nnabla . initializer . BaseInitializer ` or : obj : ` numpy . ndarray ` ) : Initializer for weight . By default , it is initialized with : obj : ` nnabla . initializer . UniformInitializer ` within the range determined by : obj : ` nnabla . initializer . calc _ uniform _ lim _ glorot ` .
b _ init ( : obj : ` nnabla . initializer . BaseInitializer ` or : obj : ` numpy . ndarray ` ) : Initializer for bias . By default , it is initialized with zeros if ` with _ bias ` is ` True ` .
base _ axis ( int ) : Dimensions up to ` base _ axis ` are treated as the sample dimensions .
fix _ parameters ( bool ) : When set to ` True ` , the weights and biases will not be updated .
rng ( numpy . random . RandomState ) : Random generator for Initializer .
with _ bias ( bool ) : Specify whether to include the bias term .
quantize _ w ( bool ) : Quantize weights if ` True ` .
sign _ w ( bool ) : Use signed quantization if ` True ` .
n _ w ( int ) : Bit width used for weight .
m _ w ( int ) : : math : ` 2 ^ m ` is upper bound and : math : ` - 2 ^ m ` is lower bound for weights . Default is 2.
ste _ fine _ grained _ w ( bool ) : STE is fine - grained if ` True ` .
quantize _ b ( bool ) : Quantize bias if ` True ` .
sign _ b ( bool ) : Use signed quantization if ` True ` .
n _ b ( int ) : Bit width used for bias .
m _ b ( int ) : : math : ` 2 ^ m ` is upper bound and : math : ` - 2 ^ m ` is lower bound for bias . Default is 2.
ste _ fine _ grained _ b ( bool ) : STE is fine - grained if ` True ` .
Returns :
: class : ` ~ nnabla . Variable ` : N - D array .""" | if w_init is None :
w_init = UniformInitializer ( calc_uniform_lim_glorot ( inp . shape [ base_axis ] , outmaps , tuple ( kernel ) ) , rng = rng )
if with_bias and b_init is None :
b_init = ConstantInitializer ( )
# Floating Weight
w = get_parameter_or_create ( "W" , ( outmaps , inp . shape [ base_axis ] // group ) + tuple ( kernel ) , w_init , True , not fix_parameters )
# Quantized Weight
if quantize_w :
w_q = get_parameter_or_create ( "W_q" , ( outmaps , inp . shape [ base_axis ] // group ) + tuple ( kernel ) , w_init , False )
# Link computation graph
real_w_q = F . pow2_quantize ( w , quantize = quantize_w , sign = sign_w , with_zero = with_zero_w , n = n_w , m = m_w , ste_fine_grained = ste_fine_grained_w , outputs = [ w_q . data ] )
real_w_q . persistent = True
else :
real_w_q = w
# Bias
# Floating
b = None
b_q = None
real_b_q = None
if with_bias :
b = get_parameter_or_create ( "b" , ( outmaps , ) , b_init , True , not fix_parameters )
if quantize_b :
b_q = get_parameter_or_create ( "b_q" , ( outmaps , ) , b_init , False )
# Link computation graph
real_b_q = F . pow2_quantize ( b , quantize = quantize_b , sign = sign_b , with_zero = with_zero_b , n = n_b , m = m_b , ste_fine_grained = ste_fine_grained_b , outputs = [ b_q . data ] )
real_b_q . persistent = True
else :
real_b_q = b
return F . convolution ( inp , real_w_q , real_b_q , base_axis , pad , stride , dilation , group ) |
def set_torrent_download_limit ( self , infohash_list , limit ) :
"""Set download speed limit of the supplied torrents .
: param infohash _ list : Single or list ( ) of infohashes .
: param limit : Speed limit in bytes .""" | data = self . _process_infohash_list ( infohash_list )
data . update ( { 'limit' : limit } )
return self . _post ( 'command/setTorrentsDlLimit' , data = data ) |
def _reinit_daq_daemons ( sender , instance , ** kwargs ) :
"""update the daq daemon configuration when changes be applied in the models""" | if type ( instance ) is Device :
try :
bp = BackgroundProcess . objects . get ( pk = instance . protocol_id )
except :
return False
bp . restart ( )
elif type ( instance ) is Variable :
try :
bp = BackgroundProcess . objects . get ( pk = instance . device . protocol_id )
except :
return False
bp . restart ( )
elif type ( instance ) is Scaling :
for bp_pk in list ( instance . variable_set . all ( ) . values_list ( 'device__protocol_id' ) . distinct ( ) ) :
try :
bp = BackgroundProcess . objects . get ( pk = bp_pk )
except :
return False
bp . restart ( )
else :
logger . debug ( 'post_save from %s' % type ( instance ) ) |
def get_airport_stats ( self , iata , page = 1 , limit = 100 ) :
"""Retrieve the performance statistics at an airport
Given the IATA code of an airport , this method returns the performance statistics for the airport .
Args :
iata ( str ) : The IATA code for an airport , e . g . HYD
page ( int ) : Optional page number ; for users who are on a plan with flightradar24 they can pass in higher page numbers to get more data
limit ( int ) : Optional limit on number of records returned
Returns :
A list of dicts with the data ; one dict for each row of data from flightradar24
Example : :
from pyflightdata import FlightData
f = FlightData ( )
# optional login
f . login ( myemail , mypassword )
f . get _ airport _ stats ( ' HYD ' )
f . get _ airport _ stats ( ' HYD ' , page = 1 , limit = 10)""" | url = AIRPORT_DATA_BASE . format ( iata , str ( self . AUTH_TOKEN ) , page , limit )
return self . _fr24 . get_airport_stats ( url ) |
def build_extension ( self , ext ) :
"""Compile manually the py _ mini _ racer extension , bypass setuptools""" | try :
if not is_v8_built ( ) :
self . run_command ( 'build_v8' )
self . debug = True
if V8_PATH :
dest_filename = join ( self . build_lib , "py_mini_racer" )
copy_file ( V8_PATH , dest_filename , verbose = self . verbose , dry_run = self . dry_run )
else :
build_ext . build_extension ( self , ext )
except Exception as e :
traceback . print_exc ( )
# Alter message
err_msg = """py_mini_racer failed to build, ensure you have an up-to-date pip (>= 8.1) to use the wheel instead
To update pip: 'pip install -U pip'
See also: https://github.com/sqreen/PyMiniRacer#binary-builds-availability
Original error: %s"""
raise Exception ( err_msg % repr ( e ) ) |
def _wrap ( self , meth , * args , ** kwargs ) :
"""Calls a given method with the appropriate arguments , or defers such
a call until the instance has been connected""" | if not self . connected :
return self . _connectSchedule ( self . _wrap , meth , * args , ** kwargs )
opres = meth ( self , * args , ** kwargs )
return self . defer ( opres ) |
def infer_ml_task ( y ) :
"""Infer the machine learning task to select for .
The result will be either ` ' regression ' ` or ` ' classification ' ` .
If the target vector only consists of integer typed values or objects , we assume the task is ` ' classification ' ` .
Else ` ' regression ' ` .
: param y : The target vector y .
: type y : pandas . Series
: return : ' classification ' or ' regression '
: rtype : str""" | if y . dtype . kind in np . typecodes [ 'AllInteger' ] or y . dtype == np . object :
ml_task = 'classification'
else :
ml_task = 'regression'
_logger . warning ( 'Infered {} as machine learning task' . format ( ml_task ) )
return ml_task |
def to_str ( self , separator = '' ) :
'''Build a string from the source sequence .
The elements of the query result will each coerced to a string and then
the resulting strings concatenated to return a single string . This
allows the natural processing of character sequences as strings . An
optional separator which will be inserted between each item may be
specified .
Note : this method uses immediate execution .
Args :
separator : An optional separator which will be coerced to a string
and inserted between each source item in the resulting string .
Returns :
A single string which is the result of stringifying each element
and concatenating the results into a single string .
Raises :
TypeError : If any element cannot be coerced to a string .
TypeError : If the separator cannot be coerced to a string .
ValueError : If the Queryable is closed .''' | if self . closed ( ) :
raise ValueError ( "Attempt to call to_str() on a closed Queryable." )
return str ( separator ) . join ( self . select ( str ) ) |
async def edit ( self , ** fields ) :
"""| coro |
Edits the group .
Parameters
name : Optional [ : class : ` str ` ]
The new name to change the group to .
Could be ` ` None ` ` to remove the name .
icon : Optional [ : class : ` bytes ` ]
A : term : ` py : bytes - like object ` representing the new icon .
Could be ` ` None ` ` to remove the icon .
Raises
HTTPException
Editing the group failed .""" | try :
icon_bytes = fields [ 'icon' ]
except KeyError :
pass
else :
if icon_bytes is not None :
fields [ 'icon' ] = utils . _bytes_to_base64_data ( icon_bytes )
data = await self . _state . http . edit_group ( self . id , ** fields )
self . _update_group ( data ) |
def define_flags ( self , parser ) :
"""Adds DebuggerPlugin CLI flags to parser .""" | group = parser . add_argument_group ( 'debugger plugin' )
group . add_argument ( '--debugger_data_server_grpc_port' , metavar = 'PORT' , type = int , default = - 1 , help = '''\
The port at which the non-interactive debugger data server should
receive debugging data via gRPC from one or more debugger-enabled
TensorFlow runtimes. No debugger plugin or debugger data server will be
started if this flag is not provided. This flag differs from the
`--debugger_port` flag in that it starts a non-interactive mode. It is
for use with the "health pills" feature of the Graph Dashboard. This
flag is mutually exclusive with `--debugger_port`.\
''' )
group . add_argument ( '--debugger_port' , metavar = 'PORT' , type = int , default = - 1 , help = '''\
The port at which the interactive debugger data server (to be started by
the debugger plugin) should receive debugging data via gRPC from one or
more debugger-enabled TensorFlow runtimes. No debugger plugin or
debugger data server will be started if this flag is not provided. This
flag differs from the `--debugger_data_server_grpc_port` flag in that it
starts an interactive mode that allows user to pause at selected nodes
inside a TensorFlow Graph or between Session.runs. It is for use with
the interactive Debugger Dashboard. This flag is mutually exclusive with
`--debugger_data_server_grpc_port`.\
''' ) |
def fetch_by_ids ( TableName , iso_id_list , numin , numax , ParameterGroups = [ ] , Parameters = [ ] ) :
"""INPUT PARAMETERS :
TableName : local table name to fetch in ( required )
iso _ id _ list : list of isotopologue id ' s ( required )
numin : lower wavenumber bound ( required )
numax : upper wavenumber bound ( required )
OUTPUT PARAMETERS :
none
DESCRIPTION :
Download line - by - line data from HITRANonline server
and save it to local table . The input parameter iso _ id _ list
contains list of " global " isotopologue Ids ( see help on ISO _ ID ) .
Note : this function is required if user wants to download
multiple species into single table .
EXAMPLE OF USAGE :
fetch _ by _ ids ( ' water ' , [ 1,2,3,4 ] , 4000,4100)""" | if type ( iso_id_list ) not in set ( [ list , tuple ] ) :
iso_id_list = [ iso_id_list ]
queryHITRAN ( TableName , iso_id_list , numin , numax , pargroups = ParameterGroups , params = Parameters )
iso_names = [ ISO_ID [ i ] [ ISO_ID_INDEX [ 'iso_name' ] ] for i in iso_id_list ]
Comment = 'Contains lines for ' + ',' . join ( iso_names )
Comment += ( '\n in %.3f-%.3f wavenumber range' % ( numin , numax ) )
comment ( TableName , Comment ) |
def itemsize ( self ) :
"""Individual item sizes""" | return self . _items [ : self . _count , 1 ] - self . _items [ : self . _count , 0 ] |
def destroy ( name , conn = None , call = None ) :
'''Delete a single VM''' | if call == 'function' :
raise SaltCloudSystemExit ( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroying instance' , 'salt/cloud/{0}/destroying' . format ( name ) , args = { 'name' : name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
if not conn :
conn = get_conn ( )
# pylint : disable = E0602
node = get_node ( conn , name )
profiles = get_configured_provider ( ) [ 'profiles' ]
# pylint : disable = E0602
if node is None :
log . error ( 'Unable to find the VM %s' , name )
profile = None
if 'metadata' in node . extra and 'profile' in node . extra [ 'metadata' ] :
profile = node . extra [ 'metadata' ] [ 'profile' ]
flush_mine_on_destroy = False
if profile and profile in profiles and 'flush_mine_on_destroy' in profiles [ profile ] :
flush_mine_on_destroy = profiles [ profile ] [ 'flush_mine_on_destroy' ]
if flush_mine_on_destroy :
log . info ( 'Clearing Salt Mine: %s' , name )
mopts_ = salt . config . DEFAULT_MINION_OPTS
conf_path = '/' . join ( __opts__ [ 'conf_file' ] . split ( '/' ) [ : - 1 ] )
mopts_ . update ( salt . config . minion_config ( os . path . join ( conf_path , 'minion' ) ) )
client = salt . client . get_local_client ( mopts_ )
minions = client . cmd ( name , 'mine.flush' )
log . info ( 'Clearing Salt Mine: %s, %s' , name , flush_mine_on_destroy )
log . info ( 'Destroying VM: %s' , name )
ret = conn . destroy_node ( node )
if ret :
log . info ( 'Destroyed VM: %s' , name )
# Fire destroy action
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'destroyed instance' , 'salt/cloud/{0}/destroyed' . format ( name ) , args = { 'name' : name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
if __opts__ [ 'delete_sshkeys' ] is True :
public_ips = getattr ( node , __opts__ . get ( 'ssh_interface' , 'public_ips' ) )
if public_ips :
salt . utils . cloud . remove_sshkey ( public_ips [ 0 ] )
private_ips = getattr ( node , __opts__ . get ( 'ssh_interface' , 'private_ips' ) )
if private_ips :
salt . utils . cloud . remove_sshkey ( private_ips [ 0 ] )
if __opts__ . get ( 'update_cachedir' , False ) is True :
__utils__ [ 'cloud.delete_minion_cachedir' ] ( name , __active_provider_name__ . split ( ':' ) [ 0 ] , __opts__ )
return True
log . error ( 'Failed to Destroy VM: %s' , name )
return False |
def multi_split ( s , split ) : # type : ( S , Iterable [ S ] ) - > List [ S ]
"""Splits on multiple given separators .""" | for r in split :
s = s . replace ( r , "|" )
return [ i for i in s . split ( "|" ) if len ( i ) > 0 ] |
def pickTextColor ( self ) :
"""Prompts the user to select a text color .""" | clr = QColorDialog . getColor ( self . textColor ( ) , self . window ( ) , 'Pick Text Color' )
if clr . isValid ( ) :
self . setTextColor ( clr ) |
def get_rm_conf ( self ) :
"""Get excluded files config from remove _ file .""" | if not os . path . isfile ( self . remove_file ) :
return None
# Convert config object into dict
parsedconfig = ConfigParser . RawConfigParser ( )
parsedconfig . read ( self . remove_file )
rm_conf = { }
for item , value in parsedconfig . items ( 'remove' ) :
if six . PY3 :
rm_conf [ item ] = value . strip ( ) . encode ( 'utf-8' ) . decode ( 'unicode-escape' ) . split ( ',' )
else :
rm_conf [ item ] = value . strip ( ) . decode ( 'string-escape' ) . split ( ',' )
return rm_conf |
def _process_option ( self , tsocket , command , option ) :
"""For all telnet options , re - implement the default telnetlib behaviour
and refuse to handle any options . If the server expresses interest in
' terminal type ' option , then reply back with ' xterm ' terminal type .""" | if command == DO and option == TTYPE :
tsocket . sendall ( IAC + WILL + TTYPE )
tsocket . sendall ( IAC + SB + TTYPE + b"\0" + b"xterm" + IAC + SE )
elif command in ( DO , DONT ) :
tsocket . sendall ( IAC + WONT + option )
elif command in ( WILL , WONT ) :
tsocket . sendall ( IAC + DONT + option ) |
def Scalars ( self , run , tag ) :
"""Retrieve the scalar events associated with a run and tag .
Args :
run : A string name of the run for which values are retrieved .
tag : A string name of the tag for which values are retrieved .
Raises :
KeyError : If the run is not found , or the tag is not available for
the given run .
Returns :
An array of ` event _ accumulator . ScalarEvents ` .""" | accumulator = self . GetAccumulator ( run )
return accumulator . Scalars ( tag ) |
def _check_data ( self ) :
"""Ensure that the data in the cache is valid .
If it ' s invalid , the cache is wiped .""" | if not self . cache_available ( ) :
return
parsed = self . _parse_data ( )
_LOGGER . debug ( 'Received new data from sensor: Temp=%.1f, Humidity=%.1f' , parsed [ MI_TEMPERATURE ] , parsed [ MI_HUMIDITY ] )
if parsed [ MI_HUMIDITY ] > 100 : # humidity over 100 procent
self . clear_cache ( )
return
if parsed [ MI_TEMPERATURE ] == 0 : # humidity over 100 procent
self . clear_cache ( )
return |
def do_your_job ( self ) :
"""the goal of the explore agent is to move to the
target while avoiding blockages on the grid .
This function is messy and needs to be looked at .
It currently has a bug in that the backtrack oscillates
so need a new method of doing this - probably checking if
previously backtracked in that direction for those coords , ie
keep track of cells visited and number of times visited ?""" | y , x = self . get_intended_direction ( )
# first find out where we should go
if self . target_x == self . current_x and self . target_y == self . current_y : # print ( self . name + " : TARGET ACQUIRED " )
if len ( self . results ) == 0 :
self . results . append ( "TARGET ACQUIRED" )
self . lg_mv ( 2 , self . name + ": TARGET ACQUIRED" )
return
self . num_steps += 1
# first try is to move on the x axis in a simple greedy search
accessible = [ '\\' , '-' , '|' , '/' , '.' ]
# randomly move in Y direction instead of X if all paths clear
if y != 0 and x != 0 and self . backtrack == [ 0 , 0 ] :
if random . randint ( 1 , 10 ) > 6 :
if self . grd . get_tile ( self . current_y + y , self . current_x ) in accessible :
self . current_y += y
self . lg_mv ( 3 , self . name + ": randomly moving Y axis " + str ( self . num_steps ) )
return
if x == 1 :
if self . grd . get_tile ( self . current_y , self . current_x + 1 ) in accessible :
self . current_x += 1
self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving West" )
return
elif x == - 1 :
if self . grd . get_tile ( self . current_y , self . current_x - 1 ) in accessible :
self . current_x -= 1
self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving East" )
return
elif y == 1 :
if self . grd . get_tile ( self . current_y + 1 , self . current_x ) in accessible :
self . current_y += 1
self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving South" )
return
elif y == - 1 :
if self . grd . get_tile ( self . current_y - 1 , self . current_x ) in accessible :
self . current_y -= 1
self . lg_mv ( 3 , self . name + ": move# " + str ( self . num_steps ) + " - moving North" )
return
self . grd . set_tile ( self . start_y , self . start_x , 'A' )
self . grd . save ( os . path . join ( os . getcwd ( ) , 'agent.txt' ) ) |
def draft ( self , ** kwargs ) :
'''Allows for easily re - drafting a policy
After a policy has been created , it was not previously possible
to re - draft the published policy . This method makes it possible
for a user with existing , published , policies to create drafts
from them so that they are modifiable .
See https : / / github . com / F5Networks / f5 - common - python / pull / 1099
: param kwargs :
: return :''' | tmos_ver = self . _meta_data [ 'bigip' ] . _meta_data [ 'tmos_version' ]
legacy = kwargs . pop ( 'legacy' , False )
if LooseVersion ( tmos_ver ) < LooseVersion ( '12.1.0' ) or legacy :
raise DraftPolicyNotSupportedInTMOSVersion ( "Drafting on this version of BIG-IP is not supported" )
kwargs = dict ( createDraft = True )
super ( Policy , self ) . _modify ( ** kwargs )
get_kwargs = { 'name' : self . name , 'partition' : self . partition , 'uri_as_parts' : True , 'subPath' : 'Drafts' }
base_uri = self . _meta_data [ 'container' ] . _meta_data [ 'uri' ]
session = self . _meta_data [ 'bigip' ] . _meta_data [ 'icr_session' ]
response = session . get ( base_uri , ** get_kwargs )
json_data = response . json ( )
self . _local_update ( json_data )
self . _activate_URI ( json_data [ 'selfLink' ] ) |
def run ( ) :
"""Execute the build process""" | args = parse ( )
build_results = [ build ( p ) for p in glob . iglob ( glob_path ) ]
if not args [ 'publish' ] :
return
for entry in build_results :
publish ( entry ) |
def init_from_datastore ( self ) :
"""Init list of submission from Datastore .
Should be called by each worker during initialization .""" | self . _attacks = { }
self . _targeted_attacks = { }
self . _defenses = { }
for entity in self . _datastore_client . query_fetch ( kind = KIND_SUBMISSION ) :
submission_id = entity . key . flat_path [ - 1 ]
submission_path = entity [ 'submission_path' ]
participant_id = { k : entity [ k ] for k in [ 'team_id' , 'baseline_id' ] if k in entity }
submission_descr = SubmissionDescriptor ( path = submission_path , participant_id = participant_id )
if list ( entity . key . flat_path [ 0 : 2 ] ) == ATTACKS_ENTITY_KEY :
self . _attacks [ submission_id ] = submission_descr
elif list ( entity . key . flat_path [ 0 : 2 ] ) == TARGET_ATTACKS_ENTITY_KEY :
self . _targeted_attacks [ submission_id ] = submission_descr
elif list ( entity . key . flat_path [ 0 : 2 ] ) == DEFENSES_ENTITY_KEY :
self . _defenses [ submission_id ] = submission_descr |
def validateProxy ( path ) :
"""Test that the proxy certificate is RFC 3820
compliant and that it is valid for at least
the next 15 minutes .""" | # load the proxy from path
try :
proxy = M2Crypto . X509 . load_cert ( path )
except Exception , e :
msg = "Unable to load proxy from path %s : %s" % ( path , e )
print >> sys . stderr , msg
sys . exit ( 1 )
# make sure the proxy is RFC 3820 compliant
# or is an end - entity X . 509 certificate
try :
proxy . get_ext ( "proxyCertInfo" )
except LookupError : # it is not an RFC 3820 proxy so check
# if it is an old globus legacy proxy
subject = proxy . get_subject ( ) . as_text ( )
if re . search ( r'.+CN=proxy$' , subject ) : # it is so print warning and exit
RFCproxyUsage ( )
sys . exit ( 1 )
# attempt to make sure the proxy is still good for more than 15 minutes
try :
expireASN1 = proxy . get_not_after ( ) . __str__ ( )
expireGMT = time . strptime ( expireASN1 , "%b %d %H:%M:%S %Y %Z" )
expireUTC = calendar . timegm ( expireGMT )
now = int ( time . time ( ) )
secondsLeft = expireUTC - now
except Exception , e : # problem getting or parsing time so just let the client
# continue and pass the issue along to the server
secondsLeft = 3600
if secondsLeft <= 0 :
msg = """\
Your proxy certificate is expired.
Please generate a new proxy certificate and
try again.
"""
print >> sys . stderr , msg
sys . exit ( 1 )
if secondsLeft < ( 60 * 15 ) :
msg = """\
Your proxy certificate expires in less than
15 minutes.
Please generate a new proxy certificate and
try again.
"""
print >> sys . stderr , msg
sys . exit ( 1 )
# return True to indicate validated proxy
return True |
def extend_volume ( self , volume , size ) :
"""Extend a volume to a new , larger size .
: param volume : Name of the volume to be extended .
: type volume : str
: type size : int or str
: param size : Size in bytes , or string representing the size of the
volume to be created .
: returns : A dictionary mapping " name " to volume and " size " to the volume ' s
new size in bytes .
: rtype : ResponseDict
. . note : :
The new size must be larger than the volume ' s old size .
. . note : :
The maximum volume size supported is 4 petabytes ( 4 * 2 ^ 50 ) .
. . note : :
If size is an int , it must be a multiple of 512.
. . note : :
If size is a string , it must consist of an integer followed by a
valid suffix .
Accepted Suffixes
Suffix Size Bytes
S Sector ( 2 ^ 9)
K Kilobyte ( 2 ^ 10)
M Megabyte ( 2 ^ 20)
G Gigabyte ( 2 ^ 30)
T Terabyte ( 2 ^ 40)
P Petabyte ( 2 ^ 50)""" | return self . set_volume ( volume , size = size , truncate = False ) |
def calc_variances ( params ) :
'''This function calculates the variance of the sum signal and all population - resolved signals''' | depth = params . electrodeParams [ 'z' ]
# # # CSD # # #
for i , data_type in enumerate ( [ 'CSD' , 'LFP' ] ) :
if i % SIZE == RANK :
f_out = h5py . File ( os . path . join ( params . savefolder , ana_params . analysis_folder , data_type + ana_params . fname_variances ) , 'w' )
f_out [ 'depths' ] = depth
for celltype in params . y :
f_in = h5py . File ( os . path . join ( params . populations_path , '%s_population_%s' % ( celltype , data_type ) + '.h5' ) )
var = f_in [ 'data' ] . value [ : , ana_params . transient : ] . var ( axis = 1 )
f_in . close ( )
f_out [ celltype ] = var
f_in = h5py . File ( os . path . join ( params . savefolder , data_type + 'sum.h5' ) )
var = f_in [ 'data' ] . value [ : , ana_params . transient : ] . var ( axis = 1 )
f_in . close ( )
f_out [ 'sum' ] = var
f_out . close ( )
return |
def downsample_with_striding ( array , factor ) :
"""Downsample x by factor using striding .
@ return : The downsampled array , of the same type as x .""" | return array [ tuple ( np . s_ [ : : f ] for f in factor ) ] |
def calculate_array_feature_extractor_output_shapes ( operator ) :
'''Allowed input / output patterns are
1 . [ N , C ] - - - > [ N , C ' ]
C ' is the number of extracted features .''' | check_input_and_output_numbers ( operator , input_count_range = 1 , output_count_range = 1 )
check_input_and_output_types ( operator , good_input_types = [ FloatTensorType , Int64TensorType , StringTensorType ] )
N = operator . inputs [ 0 ] . type . shape [ 0 ]
extracted_feature_number = len ( operator . raw_operator . arrayFeatureExtractor . extractIndex )
# Save doc _ string before over - writing by us
doc_string = operator . outputs [ 0 ] . type . doc_string
operator . outputs [ 0 ] . type = copy . deepcopy ( operator . inputs [ 0 ] . type )
operator . outputs [ 0 ] . type . shape = [ N , extracted_feature_number ]
# Assign correct doc _ string to the output
operator . outputs [ 0 ] . type . doc_string = doc_string |
def Reset ( self ) :
"""Reset the camera back to its defaults .""" | self . pan = self . world_center
self . desired_pan = self . pos |
def patch_data ( data , L = 100 , try_diag = True , verbose = False ) :
'''Patch ` ` data ` ` ( for example Markov chain output ) into parts of
length ` ` L ` ` . Return a Gaussian mixture where each component gets
the empirical mean and covariance of one patch .
: param data :
Matrix - like array ; the points to be patched . Expect ` ` data [ i ] ` `
as the d - dimensional i - th point .
: param L :
Integer ; the length of one patch . The last patch will be shorter
if ` ` L ` ` is not a divisor of ` ` len ( data ) ` ` .
: param try _ diag :
Bool ; If some patch does not define a proper covariance matrix ,
it cannot define a Gaussian component . ` ` try _ diag ` ` defines how
to handle that case :
If ` ` True ` ` ( default ) , the off - diagonal elements are set to zero
and it is tried to form a Gaussian with that matrix again . If
that fails as well , the patch is skipped .
If ` ` False ` ` the patch is skipped directly .
: param verbose :
Bool ; If ` ` True ` ` print all status information .''' | # patch data into length L patches
patches = _np . array ( [ data [ patch_start : patch_start + L ] for patch_start in range ( 0 , len ( data ) , L ) ] )
# calculate means and covs
means = _np . array ( [ _np . mean ( patch , axis = 0 ) for patch in patches ] )
covs = _np . array ( [ _np . cov ( patch , rowvar = 0 ) for patch in patches ] )
# form gaussian components
components = [ ]
skipped = [ ]
for i , ( mean , cov ) in enumerate ( zip ( means , covs ) ) :
try :
this_comp = Gauss ( mean , cov )
components . append ( this_comp )
except _np . linalg . LinAlgError as error1 :
if verbose :
print ( "Could not form Gauss from patch %i. Reason: %s" % ( i , repr ( error1 ) ) )
if try_diag :
cov = _np . diag ( _np . diag ( cov ) )
try :
this_comp = Gauss ( mean , cov )
components . append ( this_comp )
if verbose :
print ( 'Diagonal covariance attempt succeeded.' )
except _np . linalg . LinAlgError as error2 :
skipped . append ( i )
if verbose :
print ( "Diagonal covariance attempt failed. Reason: %s" % repr ( error2 ) )
else : # if not try _ diag
skipped . append ( i )
# print skipped components if any
if skipped :
print ( "WARNING: Could not form Gaussians from: %s" % skipped )
# create and return mixture
return MixtureDensity ( components ) |
def resolve_method ( state , method_name , class_name , params = ( ) , ret_type = None , include_superclasses = True , init_class = True , raise_exception_if_not_found = False ) :
"""Resolves the method based on the given characteristics ( name , class and
params ) The method may be defined in one of the superclasses of the given
class ( TODO : support interfaces ) .
: rtype : archinfo . arch _ soot . SootMethodDescriptor""" | base_class = state . javavm_classloader . get_class ( class_name )
if include_superclasses :
class_hierarchy = state . javavm_classloader . get_class_hierarchy ( base_class )
else :
class_hierarchy = [ base_class ]
# walk up in class hierarchy , until method is found
for class_descriptor in class_hierarchy :
java_binary = state . project . loader . main_object
soot_method = java_binary . get_soot_method ( method_name , class_descriptor . name , params , none_if_missing = True )
if soot_method is not None : # init the class
if init_class :
state . javavm_classloader . init_class ( class_descriptor )
return SootMethodDescriptor . from_soot_method ( soot_method )
# method could not be found
# = > we are executing code that is not loaded ( typically library code )
# = > fallback : continue with infos available from the invocation , so we
# still can use SimProcedures
if raise_exception_if_not_found :
raise SootMethodNotLoadedException ( )
else :
return SootMethodDescriptor ( class_name , method_name , params , ret_type = ret_type ) |
def read_form_data ( self ) :
"""Attempt to read the form data from the request""" | if self . processed_data :
raise exceptions . AlreadyProcessed ( 'The data has already been processed for this form' )
if self . readonly :
return
if request . method == self . method :
if self . method == 'POST' :
data = request . form
else :
data = request . args
if self . submitted_hidden_input_name in data : # The form has been submitted
self . processed_data = True
for field in self . all_fields : # We need to skip readonly fields
if field . readonly :
pass
else :
field . extract_value ( data )
# Validate the field
if not field . validate ( ) :
log . debug ( 'Validation error in field \'%s\': %s' % ( field . name , field . error ) )
self . has_errors = True |
def dicom2db ( file_path , file_type , is_copy , step_id , db_conn , sid_by_patient = False , pid_in_vid = False , visit_in_path = False , rep_in_path = False ) :
"""Extract some meta - data from a DICOM file and store in a DB .
Arguments :
: param file _ path : File path .
: param file _ type : File type ( should be ' DICOM ' ) .
: param is _ copy : Indicate if this file is a copy .
: param step _ id : Step ID
: param db _ conn : Database connection .
: param sid _ by _ patient : Rarely , a data set might use study IDs which are unique by patient
( not for the whole study ) .
E . g . : LREN data . In such a case , you have to enable this flag . This will use PatientID + StudyID as a session ID .
: param pid _ in _ vid : Rarely , a data set might mix patient IDs and visit IDs . E . g . : LREN data . In such a case , you
to enable this flag . This will try to split PatientID into VisitID and PatientID .
: param visit _ in _ path : Enable this flag to get the visit ID from the folder hierarchy instead of DICOM meta - data
( e . g . can be useful for PPMI ) .
: param rep _ in _ path : Enable this flag to get the repetition ID from the folder hierarchy instead of DICOM meta - data
( e . g . can be useful for PPMI ) .
: return : A dictionary containing the following IDs : participant _ id , visit _ id , session _ id , sequence _ type _ id ,
sequence _ id , repetition _ id , file _ id .""" | global conn
conn = db_conn
tags = dict ( )
logging . info ( "Extracting DICOM headers from '%s'" % file_path )
try :
dcm = dicom . read_file ( file_path )
dataset = db_conn . get_dataset ( step_id )
tags [ 'participant_id' ] = _extract_participant ( dcm , dataset , pid_in_vid )
if visit_in_path :
tags [ 'visit_id' ] = _extract_visit_from_path ( dcm , file_path , pid_in_vid , sid_by_patient , dataset , tags [ 'participant_id' ] )
else :
tags [ 'visit_id' ] = _extract_visit ( dcm , dataset , tags [ 'participant_id' ] , sid_by_patient , pid_in_vid )
tags [ 'session_id' ] = _extract_session ( dcm , tags [ 'visit_id' ] )
tags [ 'sequence_type_id' ] = _extract_sequence_type ( dcm )
tags [ 'sequence_id' ] = _extract_sequence ( tags [ 'session_id' ] , tags [ 'sequence_type_id' ] )
if rep_in_path :
tags [ 'repetition_id' ] = _extract_repetition_from_path ( dcm , file_path , tags [ 'sequence_id' ] )
else :
tags [ 'repetition_id' ] = _extract_repetition ( dcm , tags [ 'sequence_id' ] )
tags [ 'file_id' ] = extract_dicom ( file_path , file_type , is_copy , tags [ 'repetition_id' ] , step_id )
except InvalidDicomError :
logging . warning ( "%s is not a DICOM file !" % step_id )
except IntegrityError : # TODO : properly deal with concurrency problems
logging . warning ( "A problem occurred with the DB ! A rollback will be performed..." )
conn . db_session . rollback ( )
return tags |
def integrate_storage ( self , timeseries , position , ** kwargs ) :
"""Integrates storage into grid .
See : class : ` ~ . grid . network . StorageControl ` for more information .""" | StorageControl ( edisgo = self , timeseries = timeseries , position = position , ** kwargs ) |
def add_root_match ( self , lst_idx , root_idx ) :
"""Adds a match for the elements avaialble at lst _ idx and root _ idx .""" | self . root_matches [ lst_idx ] = root_idx
if lst_idx in self . in_result_idx :
return
self . not_in_result_root_match_idx . add ( lst_idx ) |
def reign_year_to_ad ( reign_year : int , reign : int ) -> int :
"""Reign year of Chakri dynasty , Thailand""" | if int ( reign ) == 10 :
ad = int ( reign_year ) + 2015
elif int ( reign ) == 9 :
ad = int ( reign_year ) + 1945
elif int ( reign ) == 8 :
ad = int ( reign_year ) + 1928
elif int ( reign ) == 7 :
ad = int ( reign_year ) + 1924
return ad |
def get_token_issuer ( token ) :
"""Issuer of a token is the identifier used to recover the secret
Need to extract this from token to ensure we can proceed to the signature validation stage
Does not check validity of the token
: param token : signed JWT token
: return issuer : iss field of the JWT token
: raises TokenIssuerError : if iss field not present
: raises TokenDecodeError : if token does not conform to JWT spec""" | try :
unverified = decode_token ( token )
if 'iss' not in unverified :
raise TokenIssuerError
return unverified . get ( 'iss' )
except jwt . DecodeError :
raise TokenDecodeError |
def cosine ( brands , exemplars , weighted_avg = False , sqrt = False ) :
"""Return the cosine similarity betwee a brand ' s followers and the exemplars .""" | scores = { }
for brand , followers in brands :
if weighted_avg :
scores [ brand ] = np . average ( [ _cosine ( followers , others ) for others in exemplars . values ( ) ] , weights = [ 1. / len ( others ) for others in exemplars . values ( ) ] )
else :
scores [ brand ] = 1. * sum ( _cosine ( followers , others ) for others in exemplars . values ( ) ) / len ( exemplars )
if sqrt :
scores = dict ( [ ( b , math . sqrt ( s ) ) for b , s in scores . items ( ) ] )
return scores |
def main_real ( usercode , netobj , options ) :
'''Entrypoint function for non - test ( " real " ) mode . At this point
we assume that we are running as root and have pcap module .''' | usercode_entry_point = import_or_die ( usercode , ( 'main' , 'switchy_main' ) )
if options . dryrun :
log_info ( "Imported your code successfully. Exiting dry run." )
netobj . shutdown ( )
return
try :
_start_usercode ( usercode_entry_point , netobj , options . codearg )
except Exception as e :
import traceback
log_failure ( "Exception while running your code: {}" . format ( e ) )
message = '''{0}
This is the Switchyard equivalent of the blue screen of death.
Here (repeating what's above) is the failure that occurred:
''' . format ( '*' * 60 , textwrap . fill ( str ( e ) , 60 ) )
with red ( ) :
print ( message )
traceback . print_exc ( 1 )
print ( '*' * 60 )
if options . nohandle :
raise
if not options . nopdb :
print ( '''
I'm throwing you into the Python debugger (pdb) at the point of failure.
If you don't want pdb, use the --nopdb flag to avoid this fate.
''' )
import pdb
pdb . post_mortem ( )
else :
netobj . shutdown ( ) |
def conf_budget ( self , budget ) :
"""Set limit on the number of conflicts .""" | if self . minicard :
pysolvers . minicard_cbudget ( self . minicard , budget ) |
def prod ( self , axis = None , skipna = None , level = None , numeric_only = None , min_count = 0 , ** kwargs ) :
"""Return the product of the values for the requested axis
Args :
axis : { index ( 0 ) , columns ( 1 ) }
skipna : boolean , default True
level : int or level name , default None
numeric _ only : boolean , default None
min _ count : int , default 0
Returns :
prod : Series or DataFrame ( if level specified )""" | axis = self . _get_axis_number ( axis ) if axis is not None else 0
data = self . _validate_dtypes_sum_prod_mean ( axis , numeric_only , ignore_axis = True )
return data . _reduce_dimension ( data . _query_compiler . prod ( axis = axis , skipna = skipna , level = level , numeric_only = numeric_only , min_count = min_count , ** kwargs ) ) |
def calc_frequencies ( genomes , bp_table , min_cov , min_per ) :
"""print bp frequencies to table
genomes = { } # genomes [ genome ] [ contig ] [ sample ] = { ' bp _ stats ' : { } }""" | nucs = [ 'A' , 'T' , 'G' , 'C' , 'N' ]
if bp_table is not False :
bp_table = open ( bp_table , 'w' )
header = [ '#genome' , 'contig' , 'sample' , 'position' , 'reference' , 'ref. frequency' , 'consensus' , 'con. frequency' , 'A' , 'T' , 'G' , 'C' , 'N' , '# insertions' , '# deletions' ]
print ( '\t' . join ( header ) , file = bp_table )
for genome , contigs in list ( genomes . items ( ) ) :
for contig , samples in list ( contigs . items ( ) ) :
for sample , stats in list ( samples . items ( ) ) :
for pos , ps in enumerate ( stats [ 'bp_stats' ] , 1 ) :
coverage = sum ( [ ps [ nuc ] for nuc in nucs ] )
for nuc in nucs : # make sure support for base passes thresholds
nuc_cov = ps [ nuc ]
if coverage == 0 :
nuc_per = 0
else :
nuc_per = ( float ( nuc_cov ) / coverage ) * 100
if nuc_cov < min_cov or nuc_per < min_per :
del ps [ nuc ]
ps = find_consensus ( ps )
genomes [ genome ] [ contig ] [ sample ] [ pos ] = ps
if bp_table is not False :
out = [ genome , contig , sample , pos ]
for i in [ 'ref' , 'reference frequency' , 'consensus' , 'consensus frequency' , 'A' , 'T' , 'G' , 'C' , 'N' , 'In' , 'Del' ] :
try :
if i == 'consensus' :
out . append ( ps [ i ] [ 0 ] )
elif i in [ 'In' , 'Del' ] :
out . append ( len ( ps [ i ] ) )
else :
out . append ( ps [ i ] )
except :
out . append ( 'n/a' )
print ( '\t' . join ( [ str ( i ) for i in out ] ) , file = bp_table )
return genomes |
def p_constant_declaration ( p ) :
'constant _ declaration : STRING EQUALS static _ scalar' | p [ 0 ] = ast . ConstantDeclaration ( p [ 1 ] , p [ 3 ] , lineno = p . lineno ( 1 ) ) |
def from_s3_json ( cls , bucket_name , key , json_path = None , key_mapping = None , aws_profile = None , aws_access_key_id = None , aws_secret_access_key = None , region_name = None ) : # pragma : no cover
"""Load database credential from json on s3.
: param bucket _ name : str
: param key : str
: param aws _ profile : if None , assume that you are using this from
AWS cloud . ( service on the same cloud doesn ' t need profile name )
: param aws _ access _ key _ id : str , not recommend to use
: param aws _ secret _ access _ key : str , not recommend to use
: param region _ name : str""" | import boto3
ses = boto3 . Session ( aws_access_key_id = aws_access_key_id , aws_secret_access_key = aws_secret_access_key , region_name = region_name , profile_name = aws_profile , )
s3 = ses . resource ( "s3" )
bucket = s3 . Bucket ( bucket_name )
object = bucket . Object ( key )
data = json . loads ( object . get ( ) [ "Body" ] . read ( ) . decode ( "utf-8" ) )
return cls . _from_json_data ( data , json_path , key_mapping ) |
def get_credentials ( cmd_args ) :
""": return : The email and api _ key to use to connect to TagCube . This
function will try to get the credentials from :
* Command line arguments
* Environment variables
* Configuration file
It will return the first match , in the order specified above .""" | # Check the cmd args , return if we have something here
cmd_credentials = cmd_args . email , cmd_args . key
if cmd_credentials != ( None , None ) :
cli_logger . debug ( 'Using command line configured credentials' )
return cmd_credentials
env_email , env_api_key = get_config_from_env ( )
if env_email is not None :
if env_api_key is not None :
cli_logger . debug ( 'Using environment configured credentials' )
return env_email , env_api_key
cfg_email , cfg_api_key = parse_config_file ( )
if cfg_email is not None :
if cfg_api_key is not None :
cli_logger . debug ( 'Using .tagcube file configured credentials' )
return cfg_email , cfg_api_key
raise ValueError ( NO_CREDENTIALS_ERROR ) |
def foldr ( f , seq , default = _no_default ) :
"""Fold a function over a sequence with right associativity .
Parameters
f : callable [ any , any ]
The function to reduce the sequence with .
The first argument will be the element of the sequence ; the second
argument will be the accumulator .
seq : iterable [ any ]
The sequence to reduce .
default : any , optional
The starting value to reduce with . If not provided , the sequence
cannot be empty , and the last value of the sequence will be used .
Returns
folded : any
The folded value .
Notes
This functions works by reducing the list in a right associative way .
For example , imagine we are folding with ` ` operator . add ` ` or ` ` + ` ` :
. . code - block : : python
foldr ( add , seq ) - > seq [ 0 ] + ( seq [ 1 ] + ( seq [ 2 ] + ( . . . seq [ - 1 ] , default ) ) )
In the more general case with an arbitrary function , ` ` foldr ` ` will expand
like so :
. . code - block : : python
foldr ( f , seq ) - > f ( seq [ 0 ] , f ( seq [ 1 ] , f ( seq [ 2 ] , . . . f ( seq [ - 1 ] , default ) ) ) )
For a more in depth discussion of left and right folds , see :
` https : / / en . wikipedia . org / wiki / Fold _ ( higher - order _ function ) ` _
The images in that page are very good for showing the differences between
` ` foldr ` ` and ` ` foldl ` ` ( ` ` reduce ` ` ) .
. . note : :
For performance reasons is is best to pass a strict ( non - lazy ) sequence ,
for example , a list .
See Also
: func : ` functools . reduce `
: func : ` sum `""" | return reduce ( flip ( f ) , reversed ( seq ) , * ( default , ) if default is not _no_default else ( ) ) |
def send_pgrp ( cls , sock , pgrp ) :
"""Send the PGRP chunk over the specified socket .""" | assert ( isinstance ( pgrp , IntegerForPid ) and pgrp < 0 )
encoded_int = cls . encode_int ( pgrp )
cls . write_chunk ( sock , ChunkType . PGRP , encoded_int ) |
def delete_relationship ( self , entity1_ilx : str , relationship_ilx : str , entity2_ilx : str ) -> dict :
"""Adds relationship connection in Interlex
A relationship exists as 3 different parts :
1 . entity with type term , cde , fde , or pde
2 . entity with type relationship that connects entity1 to entity2
- > Has its ' own meta data , so no value needed
3 . entity with type term , cde , fde , or pde""" | entity1_data = self . get_entity ( entity1_ilx )
if not entity1_data [ 'id' ] :
exit ( 'entity1_ilx: ' + entity1_data + ' does not exist' )
relationship_data = self . get_entity ( relationship_ilx )
if not relationship_data [ 'id' ] :
exit ( 'relationship_ilx: ' + relationship_ilx + ' does not exist' )
entity2_data = self . get_entity ( entity2_ilx )
if not entity2_data [ 'id' ] :
exit ( 'entity2_ilx: ' + entity2_data + ' does not exist' )
data = { 'term1_id' : ' ' , # entity1 _ data [ ' id ' ] ,
'relationship_tid' : ' ' , # relationship _ data [ ' id ' ] ,
'term2_id' : ' ' , # entity2 _ data [ ' id ' ] ,
'term1_version' : entity1_data [ 'version' ] , 'term2_version' : entity2_data [ 'version' ] , 'relationship_term_version' : relationship_data [ 'version' ] , 'orig_uid' : self . user_id , # BUG : php lacks orig _ uid update
}
entity_relationships = self . get_relationship_via_tid ( entity1_data [ 'id' ] )
# TODO : parse through entity _ relationships to see if we have a match ; else print warning and return None
relationship_id = None
for relationship in entity_relationships :
if str ( relationship [ 'term1_id' ] ) == str ( entity1_data [ 'id' ] ) :
if str ( relationship [ 'term2_id' ] ) == str ( entity2_data [ 'id' ] ) :
if str ( relationship [ 'relationship_tid' ] ) == str ( relationship_data [ 'id' ] ) :
relationship_id = relationship [ 'id' ]
break
if not relationship_id :
print ( '''WARNING: Annotation you wanted to delete does not exist ''' )
return None
url = self . base_url + 'term/edit-relationship/{id}' . format ( id = relationship_id )
output = self . post ( url = url , data = data , )
return output |
def _ping_loop ( self ) :
"""This background task sends a PING to the server at the requested
interval .""" | self . pong_received = True
self . ping_loop_event . clear ( )
while self . state == 'connected' :
if not self . pong_received :
self . logger . info ( 'PONG response has not been received, aborting' )
if self . ws :
self . ws . close ( )
self . queue . put ( None )
break
self . pong_received = False
self . _send_packet ( packet . Packet ( packet . PING ) )
self . ping_loop_event . wait ( timeout = self . ping_interval )
self . logger . info ( 'Exiting ping task' ) |
def _convert ( self , dictlike ) :
"""Validate and convert a dict - like object into values for set ( ) ing .
This is called behind the scenes when a MappedCollection is replaced
entirely by another collection , as in : :
myobj . mappedcollection = { ' a ' : obj1 , ' b ' : obj2 } # . . .
Raises a TypeError if the key in any ( key , value ) pair in the dictlike
object does not match the key that this collection ' s keyfunc would
have assigned for that value .""" | for incoming_key , valuelist in util . dictlike_iteritems ( dictlike ) :
for value in valuelist :
new_key = self . keyfunc ( value )
if incoming_key != new_key :
raise TypeError ( "Found incompatible key %r for value %r; this " "collection's " "keying function requires a key of %r for this value." % ( incoming_key , value , new_key ) )
yield value |
def _do_extraction ( df , column_id , column_value , column_kind , default_fc_parameters , kind_to_fc_parameters , n_jobs , chunk_size , disable_progressbar , distributor ) :
"""Wrapper around the _ do _ extraction _ on _ chunk , which calls it on all chunks in the data frame .
A chunk is a subset of the data , with a given kind and id - so a single time series .
The data is separated out into those single time series and the _ do _ extraction _ on _ chunk is
called on each of them . The results are then combined into a single pandas DataFrame .
The call is either happening in parallel or not and is showing a progress bar or not depending
on the given flags .
: param df : The dataframe in the normalized format which is used for extraction .
: type df : pd . DataFrame
: param default _ fc _ parameters : mapping from feature calculator names to parameters . Only those names
which are keys in this dict will be calculated . See the class : ` ComprehensiveFCParameters ` for
more information .
: type default _ fc _ parameters : dict
: param kind _ to _ fc _ parameters : mapping from kind names to objects of the same type as the ones for
default _ fc _ parameters . If you put a kind as a key here , the fc _ parameters
object ( which is the value ) , will be used instead of the default _ fc _ parameters .
: type kind _ to _ fc _ parameters : dict
: param column _ id : The name of the id column to group by .
: type column _ id : str
: param column _ kind : The name of the column keeping record on the kind of the value .
: type column _ kind : str
: param column _ value : The name for the column keeping the value itself .
: type column _ value : str
: param chunk _ size : The size of one chunk for the parallelization
: type chunk _ size : None or int
: param n _ jobs : The number of processes to use for parallelization . If zero , no parallelization is used .
: type n _ jobs : int
: param disable _ progressbar : Do not show a progressbar while doing the calculation .
: type disable _ progressbar : bool
: param distributor : Advanced parameter : See the utilities / distribution . py for more information .
Leave to None , if you want TSFresh to choose the best distributor .
: type distributor : DistributorBaseClass
: return : the extracted features
: rtype : pd . DataFrame""" | data_in_chunks = generate_data_chunk_format ( df , column_id , column_kind , column_value )
if distributor is None :
if n_jobs == 0 :
distributor = MapDistributor ( disable_progressbar = disable_progressbar , progressbar_title = "Feature Extraction" )
else :
distributor = MultiprocessingDistributor ( n_workers = n_jobs , disable_progressbar = disable_progressbar , progressbar_title = "Feature Extraction" )
if not isinstance ( distributor , DistributorBaseClass ) :
raise ValueError ( "the passed distributor is not an DistributorBaseClass object" )
kwargs = dict ( default_fc_parameters = default_fc_parameters , kind_to_fc_parameters = kind_to_fc_parameters )
result = distributor . map_reduce ( _do_extraction_on_chunk , data = data_in_chunks , chunk_size = chunk_size , function_kwargs = kwargs )
distributor . close ( )
# Return a dataframe in the typical form ( id as index and feature names as columns )
result = pd . DataFrame ( result )
if result . columns . contains ( "value" ) :
result [ "value" ] = result [ "value" ] . astype ( float )
if len ( result ) != 0 :
result = result . pivot ( "id" , "variable" , "value" )
result . index = result . index . astype ( df [ column_id ] . dtype )
return result |
def parse_arguments ( argv ) :
"""Parse the command line arguments .""" | parser = argparse . ArgumentParser ( description = ( 'Train a regression or classification model. Note that if ' 'using a DNN model, --layer-size1=NUM, --layer-size2=NUM, ' 'should be used. ' ) )
# I / O file parameters
parser . add_argument ( '--train-data-paths' , type = str , action = 'append' , required = True )
parser . add_argument ( '--eval-data-paths' , type = str , action = 'append' , required = True )
parser . add_argument ( '--job-dir' , type = str , required = True )
parser . add_argument ( '--preprocess-output-dir' , type = str , required = True , help = ( 'Output folder of preprocessing. Should contain the' ' schema file, and numerical stats and vocab files.' ' Path must be on GCS if running' ' cloud training.' ) )
parser . add_argument ( '--transforms-file' , type = str , required = True , help = ( 'File describing the the transforms to apply on ' 'each column' ) )
# HP parameters
parser . add_argument ( '--learning-rate' , type = float , default = 0.01 , help = 'tf.train.AdamOptimizer learning rate' )
parser . add_argument ( '--epsilon' , type = float , default = 0.0005 , help = 'tf.train.AdamOptimizer epsilon' )
# - - layer _ size See below
# Model problems
parser . add_argument ( '--model-type' , choices = [ 'linear_classification' , 'linear_regression' , 'dnn_classification' , 'dnn_regression' ] , required = True )
parser . add_argument ( '--top-n' , type = int , default = 1 , help = ( 'For classification problems, the output graph ' 'will contain the labels and scores for the top ' 'n classes.' ) )
# Training input parameters
parser . add_argument ( '--max-steps' , type = int , default = 5000 , help = 'Maximum number of training steps to perform.' )
parser . add_argument ( '--num-epochs' , type = int , help = ( 'Maximum number of training data epochs on which ' 'to train. If both --max-steps and --num-epochs ' 'are specified, the training job will run for ' '--max-steps or --num-epochs, whichever occurs ' 'first. If unspecified will run for --max-steps.' ) )
parser . add_argument ( '--train-batch-size' , type = int , default = 1000 )
parser . add_argument ( '--eval-batch-size' , type = int , default = 1000 )
parser . add_argument ( '--min-eval-frequency' , type = int , default = 100 , help = ( 'Minimum number of training steps between ' 'evaluations' ) )
# other parameters
parser . add_argument ( '--save-checkpoints-secs' , type = int , default = 600 , help = ( 'How often the model should be checkpointed/saved ' 'in seconds' ) )
args , remaining_args = parser . parse_known_args ( args = argv [ 1 : ] )
# All HP parambeters must be unique , so we need to support an unknown number
# of - - layer _ size1 = 10 - - layer _ size2 = 10 . . .
# Look at remaining _ args for layer _ size \ d + to get the layer info .
# Get number of layers
pattern = re . compile ( 'layer-size(\d+)' )
num_layers = 0
for other_arg in remaining_args :
match = re . search ( pattern , other_arg )
if match :
num_layers = max ( num_layers , int ( match . group ( 1 ) ) )
# Build a new parser so we catch unknown args and missing layer _ sizes .
parser = argparse . ArgumentParser ( )
for i in range ( num_layers ) :
parser . add_argument ( '--layer-size%s' % str ( i + 1 ) , type = int , required = True )
layer_args = vars ( parser . parse_args ( args = remaining_args ) )
layer_sizes = [ ]
for i in range ( num_layers ) :
key = 'layer_size%s' % str ( i + 1 )
layer_sizes . append ( layer_args [ key ] )
assert len ( layer_sizes ) == num_layers
args . layer_sizes = layer_sizes
return args |
def _on_closed ( self ) :
"""Invoked by connections when they are closed .""" | self . _connected . clear ( )
if not self . _closing :
if self . _on_close_callback :
self . _on_close_callback ( )
else :
raise exceptions . ConnectionError ( 'closed' ) |
def assert_true ( expr , msg_fmt = "{msg}" ) :
"""Fail the test unless the expression is truthy .
> > > assert _ true ( " Hello World ! " )
> > > assert _ true ( " " )
Traceback ( most recent call last ) :
AssertionError : ' ' is not truthy
The following msg _ fmt arguments are supported :
* msg - the default error message
* expr - tested expression""" | if not expr :
msg = "{!r} is not truthy" . format ( expr )
fail ( msg_fmt . format ( msg = msg , expr = expr ) ) |
def _cast_to_stata_types ( data ) :
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata , and
converts if necessary .
Parameters
data : DataFrame
The DataFrame to check and convert
Notes
Numeric columns in Stata must be one of int8 , int16 , int32 , float32 or
float64 , with some additional value restrictions . int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed .
int64 data is not usable in Stata , and so it is downcast to int32 whenever
the value are in the int32 range , and sidecast to float64 when larger than
this range . If the int64 values are outside of the range of those
perfectly representable as float64 values , a warning is raised .
bool columns are cast to int8 . uint columns are converted to int of the
same size if there is no loss in precision , otherwise are upcast to a
larger type . uint64 is currently not supported since it is concerted to
object in a DataFrame .""" | ws = ''
# original , if small , if large
conversion_data = ( ( np . bool , np . int8 , np . int8 ) , ( np . uint8 , np . int8 , np . int16 ) , ( np . uint16 , np . int16 , np . int32 ) , ( np . uint32 , np . int32 , np . int64 ) )
float32_max = struct . unpack ( '<f' , b'\xff\xff\xff\x7e' ) [ 0 ]
float64_max = struct . unpack ( '<d' , b'\xff\xff\xff\xff\xff\xff\xdf\x7f' ) [ 0 ]
for col in data :
dtype = data [ col ] . dtype
# Cast from unsupported types to supported types
for c_data in conversion_data :
if dtype == c_data [ 0 ] :
if data [ col ] . max ( ) <= np . iinfo ( c_data [ 1 ] ) . max :
dtype = c_data [ 1 ]
else :
dtype = c_data [ 2 ]
if c_data [ 2 ] == np . float64 : # Warn if necessary
if data [ col ] . max ( ) >= 2 ** 53 :
ws = precision_loss_doc % ( 'uint64' , 'float64' )
data [ col ] = data [ col ] . astype ( dtype )
# Check values and upcast if necessary
if dtype == np . int8 :
if data [ col ] . max ( ) > 100 or data [ col ] . min ( ) < - 127 :
data [ col ] = data [ col ] . astype ( np . int16 )
elif dtype == np . int16 :
if data [ col ] . max ( ) > 32740 or data [ col ] . min ( ) < - 32767 :
data [ col ] = data [ col ] . astype ( np . int32 )
elif dtype == np . int64 :
if ( data [ col ] . max ( ) <= 2147483620 and data [ col ] . min ( ) >= - 2147483647 ) :
data [ col ] = data [ col ] . astype ( np . int32 )
else :
data [ col ] = data [ col ] . astype ( np . float64 )
if data [ col ] . max ( ) >= 2 ** 53 or data [ col ] . min ( ) <= - 2 ** 53 :
ws = precision_loss_doc % ( 'int64' , 'float64' )
elif dtype in ( np . float32 , np . float64 ) :
value = data [ col ] . max ( )
if np . isinf ( value ) :
raise ValueError ( 'Column {col} has a maximum value of ' 'infinity which is outside the range ' 'supported by Stata.' . format ( col = col ) )
if dtype == np . float32 and value > float32_max :
data [ col ] = data [ col ] . astype ( np . float64 )
elif dtype == np . float64 :
if value > float64_max :
raise ValueError ( 'Column {col} has a maximum value ' '({val}) outside the range supported by ' 'Stata ({float64_max})' . format ( col = col , val = value , float64_max = float64_max ) )
if ws :
warnings . warn ( ws , PossiblePrecisionLoss )
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.