signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def write_tar ( src_fs , # type : FS
file , # type : Union [ Text , BinaryIO ]
compression = None , # type : Optional [ Text ]
encoding = "utf-8" , # type : Text
walker = None , # type : Optional [ Walker ]
) : # type : ( . . . ) - > None
"""Write the contents of a filesystem to a tar file .
Arguments :
file ( str or io . IOBase ) : Destination file , may be a file
name or an open file object .
compression ( str , optional ) : Compression to use , or ` None `
for a plain Tar archive without compression .
encoding ( str ) : The encoding to use for filenames . The
default is ` ` " utf - 8 " ` ` .
walker ( ~ fs . walk . Walker , optional ) : A ` Walker ` instance , or
` None ` to use default walker . You can use this to specify
which files you want to compress .""" | type_map = { ResourceType . block_special_file : tarfile . BLKTYPE , ResourceType . character : tarfile . CHRTYPE , ResourceType . directory : tarfile . DIRTYPE , ResourceType . fifo : tarfile . FIFOTYPE , ResourceType . file : tarfile . REGTYPE , ResourceType . socket : tarfile . AREGTYPE , # no type for socket
ResourceType . symlink : tarfile . SYMTYPE , ResourceType . unknown : tarfile . AREGTYPE , # no type for unknown
}
tar_attr = [ ( "uid" , "uid" ) , ( "gid" , "gid" ) , ( "uname" , "user" ) , ( "gname" , "group" ) ]
mode = "w:{}" . format ( compression or "" )
if isinstance ( file , ( six . text_type , six . binary_type ) ) :
_tar = tarfile . open ( file , mode = mode )
else :
_tar = tarfile . open ( fileobj = file , mode = mode )
current_time = time . time ( )
walker = walker or Walker ( )
with _tar :
gen_walk = walker . info ( src_fs , namespaces = [ "details" , "stat" , "access" ] )
for path , info in gen_walk : # Tar names must be relative
tar_name = relpath ( path )
if not six . PY3 : # Python2 expects bytes filenames
tar_name = tar_name . encode ( encoding , "replace" )
tar_info = tarfile . TarInfo ( tar_name )
if info . has_namespace ( "stat" ) :
mtime = info . get ( "stat" , "st_mtime" , current_time )
else :
mtime = info . modified or current_time
if isinstance ( mtime , datetime ) :
mtime = datetime_to_epoch ( mtime )
if isinstance ( mtime , float ) :
mtime = int ( mtime )
tar_info . mtime = mtime
for tarattr , infoattr in tar_attr :
if getattr ( info , infoattr , None ) is not None :
setattr ( tar_info , tarattr , getattr ( info , infoattr , None ) )
if info . has_namespace ( "access" ) :
tar_info . mode = getattr ( info . permissions , "mode" , 0o420 )
if info . is_dir :
tar_info . type = tarfile . DIRTYPE
_tar . addfile ( tar_info )
else :
tar_info . type = type_map . get ( info . type , tarfile . REGTYPE )
tar_info . size = info . size
with src_fs . openbin ( path ) as bin_file :
_tar . addfile ( tar_info , bin_file ) |
def down ( self , point ) :
"""Set initial cursor window coordinates and pick constrain - axis .""" | self . _vdown = arcball_map_to_sphere ( point , self . _center , self . _radius )
self . _qdown = self . _qpre = self . _qnow
if self . _constrain and self . _axes is not None :
self . _axis = arcball_nearest_axis ( self . _vdown , self . _axes )
self . _vdown = arcball_constrain_to_axis ( self . _vdown , self . _axis )
else :
self . _axis = None |
def run_thread ( self , usnap = .2 , daemon = True ) :
"""run thread with data""" | # self . stream _ data ( ) # Unless other changes are made this would limit to localhost only .
try :
gps3_data_thread = Thread ( target = self . unpack_data , args = { usnap : usnap } , daemon = daemon )
except TypeError : # threading . Thread ( ) only accepts daemon argument in Python 3.3
gps3_data_thread = Thread ( target = self . unpack_data , args = { usnap : usnap } )
gps3_data_thread . setDaemon ( daemon )
gps3_data_thread . start ( ) |
def is_reserved_ip ( self , ip ) :
"""Check if the given ip address is in a reserved ipv4 address space .
: param ip : ip address
: return : boolean""" | theip = ipaddress ( ip )
for res in self . _reserved_netmasks :
if theip in ipnetwork ( res ) :
return True
return False |
def rpc_export ( rpc_method_name , sync = False ) :
"""Export a function or plugin method as a msgpack - rpc request handler .""" | def dec ( f ) :
f . _nvim_rpc_method_name = rpc_method_name
f . _nvim_rpc_sync = sync
f . _nvim_bind = True
f . _nvim_prefix_plugin_path = False
return f
return dec |
def sender ( self ) :
""": returns : A : class : ` ~ okcupyd . profile . Profile ` instance belonging
to the sender of this message .""" | return ( self . _message_thread . user_profile if 'from_me' in self . _message_element . attrib [ 'class' ] else self . _message_thread . correspondent_profile ) |
def sort_magic_data ( magic_data , sort_name ) :
'''Sort magic _ data by header ( like er _ specimen _ name for example )''' | magic_data_sorted = { }
for rec in magic_data :
name = rec [ sort_name ]
if name not in list ( magic_data_sorted . keys ( ) ) :
magic_data_sorted [ name ] = [ ]
magic_data_sorted [ name ] . append ( rec )
return magic_data_sorted |
def _PrintSessionsOverview ( self , storage_reader ) :
"""Prints a sessions overview .
Args :
storage _ reader ( StorageReader ) : storage reader .""" | table_view = views . ViewsFactory . GetTableView ( self . _views_format_type , title = 'Sessions' )
for session in storage_reader . GetSessions ( ) :
start_time = timelib . Timestamp . CopyToIsoFormat ( session . start_time )
session_identifier = uuid . UUID ( hex = session . identifier )
session_identifier = '{0!s}' . format ( session_identifier )
table_view . AddRow ( [ session_identifier , start_time ] )
table_view . Write ( self . _output_writer ) |
def generate_api_gateway ( ) :
"""Create the Blockade API Gateway REST service .""" | logger . debug ( "[#] Setting up the API Gateway" )
client = boto3 . client ( 'apigateway' , region_name = PRIMARY_REGION )
matches = [ x for x in client . get_rest_apis ( ) . get ( 'items' , list ( ) ) if x [ 'name' ] == API_GATEWAY ]
if len ( matches ) > 0 :
logger . debug ( "[#] API Gateway already setup" )
return matches . pop ( )
response = client . create_rest_api ( name = API_GATEWAY , description = 'REST-API to power the Blockade service' )
logger . info ( "[#] Successfully setup the API Gateway" )
return response |
def append ( self , entry ) :
"""Append an entry to self""" | if not self . is_appendable ( entry ) :
raise ValueError ( 'entry not appendable' )
self . data += entry . data |
def p_expr ( self , p ) :
"""expr : assignment _ expr
| expr COMMA assignment _ expr""" | if len ( p ) == 2 :
p [ 0 ] = p [ 1 ]
else :
p [ 0 ] = ast . Comma ( left = p [ 1 ] , right = p [ 3 ] ) |
def get_view_names ( engine : Engine ) -> List [ str ] :
"""Returns a list of database view names from the : class : ` Engine ` .""" | insp = Inspector . from_engine ( engine )
return insp . get_view_names ( ) |
def transformToNative ( obj ) :
"""Turn a recurring Component into a RecurringComponent .""" | if not obj . isNative :
object . __setattr__ ( obj , '__class__' , RecurringComponent )
obj . isNative = True
return obj |
def from_date ( self , value : date ) -> datetime :
"""Initializes from the given date value""" | assert isinstance ( value , date )
# self . value = datetime . combine ( value , time . min )
self . value = datetime ( value . year , value . month , value . day )
return self . value |
def exists ( self , pattern , ** match_kwargs ) :
"""Check if image exists in screen
Returns :
If exists , return FindPoint , or
return None if result . confidence < self . image _ match _ threshold""" | ret = self . match ( pattern , ** match_kwargs )
if ret is None :
return None
if not ret . matched :
return None
return ret |
def get_task_cls ( cls , name ) :
"""Returns an unambiguous class or raises an exception .""" | task_cls = cls . _get_reg ( ) . get ( name )
if not task_cls :
raise TaskClassNotFoundException ( cls . _missing_task_msg ( name ) )
if task_cls == cls . AMBIGUOUS_CLASS :
raise TaskClassAmbigiousException ( 'Task %r is ambiguous' % name )
return task_cls |
def main ( args = sys . argv ) :
"""main entry point for the manifest CLI""" | if len ( args ) < 2 :
return usage ( "Command expected" )
command = args [ 1 ]
rest = args [ 2 : ]
if "create" . startswith ( command ) :
return cli_create ( rest )
elif "query" . startswith ( command ) :
return cli_query ( rest )
elif "verify" . startswith ( command ) :
return cli_verify ( rest )
else :
return usage ( "Unknown command: %s" % command ) |
async def queryone ( self , stmt , * args ) :
"""Query for exactly one result .
Raises NoResultError if there are no results , or ValueError if
there are more than one .""" | results = await self . query ( stmt , * args )
if len ( results ) == 0 :
raise NoResultError ( )
elif len ( results ) > 1 :
raise ValueError ( "Expected 1 result, got %d" % len ( results ) )
return results [ 0 ] |
def clear_terminal ( self ) :
"""Reimplement ShellBaseWidget method""" | self . clear ( )
self . new_prompt ( self . interpreter . p2 if self . interpreter . more else self . interpreter . p1 ) |
def _reduce_opacity ( self ) :
"""Reduce opacity for watermark image .""" | if self . image . mode != 'RGBA' :
image = self . image . convert ( 'RGBA' )
else :
image = self . image . copy ( )
alpha = image . split ( ) [ 3 ]
alpha = ImageEnhance . Brightness ( alpha ) . enhance ( self . opacity )
image . putalpha ( alpha )
self . image = image |
def rms ( self , stride = 1 ) :
"""Calculate the root - mean - square value of this ` TimeSeries `
once per stride .
Parameters
stride : ` float `
stride ( seconds ) between RMS calculations
Returns
rms : ` TimeSeries `
a new ` TimeSeries ` containing the RMS value with dt = stride""" | stridesamp = int ( stride * self . sample_rate . value )
nsteps = int ( self . size // stridesamp )
# stride through TimeSeries , recording RMS
data = numpy . zeros ( nsteps )
for step in range ( nsteps ) : # find step TimeSeries
idx = int ( stridesamp * step )
idx_end = idx + stridesamp
stepseries = self [ idx : idx_end ]
rms_ = numpy . sqrt ( numpy . mean ( numpy . abs ( stepseries . value ) ** 2 ) )
data [ step ] = rms_
name = '%s %.2f-second RMS' % ( self . name , stride )
return self . __class__ ( data , channel = self . channel , t0 = self . t0 , name = name , sample_rate = ( 1 / float ( stride ) ) ) |
def provideObjectsToLearn ( self , objectNames = None ) :
"""Returns the objects in a canonical format to be sent to an experiment .
The returned format is a a dictionary where the keys are object names , and
values are lists of sensations , each sensation being a mapping from
cortical column index to a pair of SDR ' s ( one location and one feature ) .
returnDict = {
" objectId1 " : [
0 : ( set ( [ 1 , 5 , 10 ] ) , set ( [ 6 , 12 , 52 ] ) , # location , feature for CC0
1 : ( set ( [ 6 , 2 , 15 ] ) , set ( [ 64 , 1 , 5 ] ) , # location , feature for CC1
0 : ( set ( [ 5 , 46 , 50 ] ) , set ( [ 8 , 10 , 11 ] ) , # location , feature for CC0
1 : ( set ( [ 1 , 6 , 45 ] ) , set ( [ 12 , 17 , 23 ] ) , # location , feature for CC1
" objectId2 " : [
Parameters :
@ param objectNames ( list )
List of object names to provide to the experiment""" | if objectNames is None :
objectNames = self . objects . keys ( )
objects = { }
for name in objectNames :
objects [ name ] = [ self . _getSDRPairs ( [ pair ] * self . numColumns ) for pair in self . objects [ name ] ]
self . _checkObjectsToLearn ( objects )
return objects |
def register ( ) :
"""Return dictionary of tranform factories""" | registry = { key : bake_html ( key ) for key in ( 'css' , 'css-all' , 'tag' , 'text' ) }
registry [ 'xpath' ] = bake_parametrized ( xpath_selector , select_all = False )
registry [ 'xpath-all' ] = bake_parametrized ( xpath_selector , select_all = True )
return registry |
def encode_sentences ( sentences , vocab = None , invalid_label = - 1 , invalid_key = '\n' , start_label = 0 , unknown_token = None ) :
"""Encode sentences and ( optionally ) build a mapping
from string tokens to integer indices . Unknown keys
will be added to vocabulary .
Parameters
sentences : list of list of str
A list of sentences to encode . Each sentence
should be a list of string tokens .
vocab : None or dict of str - > int
Optional input Vocabulary
invalid _ label : int , default - 1
Index for invalid token , like < end - of - sentence >
invalid _ key : str , default ' \\ n '
Key for invalid token . Use ' \\ n ' for end
of sentence by default .
start _ label : int
lowest index .
unknown _ token : str
Symbol to represent unknown token .
If not specified , unknown token will be skipped .
Returns
result : list of list of int
encoded sentences
vocab : dict of str - > int
result vocabulary""" | idx = start_label
if vocab is None :
vocab = { invalid_key : invalid_label }
new_vocab = True
else :
new_vocab = False
res = [ ]
for sent in sentences :
coded = [ ]
for word in sent :
if word not in vocab :
assert ( new_vocab or unknown_token ) , "Unknown token %s" % word
if idx == invalid_label :
idx += 1
if unknown_token :
word = unknown_token
vocab [ word ] = idx
idx += 1
coded . append ( vocab [ word ] )
res . append ( coded )
return res , vocab |
def cancel ( self , block = True ) :
"""Cancel a call to consume ( ) happening in another thread
This could take up to DashiConnection . consumer _ timeout to complete .
@ param block : if True , waits until the consumer has returned""" | if self . _consumer :
self . _consumer . cancel ( block = block ) |
def date ( self ) -> Optional [ DateHeader ] :
"""The ` ` Date ` ` header .""" | try :
return cast ( DateHeader , self [ b'date' ] [ 0 ] )
except ( KeyError , IndexError ) :
return None |
def map_with_slider ( h3 : HistogramND , * , show_zero : bool = True , show_values : bool = False , ** kwargs ) -> dict :
"""Heatmap showing slice in first two dimensions , third dimension represented as a slider .
Parameters""" | vega = _create_figure ( kwargs )
values_arr = get_data ( h3 , kwargs . pop ( "density" , None ) , kwargs . pop ( "cumulative" , None ) )
values = values_arr . tolist ( )
value_format = get_value_format ( kwargs . pop ( "value_format" , None ) )
_add_title ( h3 , vega , kwargs )
_create_scales ( h3 , vega , kwargs )
_create_axes ( h3 , vega , kwargs )
_create_cmap_scale ( values_arr , vega , kwargs )
_create_colorbar ( vega , kwargs )
x = h3 . get_bin_centers ( 0 )
y = h3 . get_bin_centers ( 1 )
x1 = h3 . get_bin_left_edges ( 0 )
x2 = h3 . get_bin_right_edges ( 0 )
y1 = h3 . get_bin_left_edges ( 1 )
y2 = h3 . get_bin_right_edges ( 1 )
data = [ ]
for i in range ( h3 . shape [ 0 ] ) :
for j in range ( h3 . shape [ 1 ] ) :
for k in range ( h3 . shape [ 2 ] ) :
if not show_zero and values [ i ] [ j ] [ k ] == 0 :
continue
item = { "x" : float ( x [ i ] ) , "x1" : float ( x1 [ i ] ) , "x2" : float ( x2 [ i ] ) , "y" : float ( y [ j ] ) , "y1" : float ( y1 [ j ] ) , "y2" : float ( y2 [ j ] ) , "k" : k , "c" : float ( values [ i ] [ j ] [ k ] ) , }
if show_values :
item [ "label" ] = value_format ( values [ i ] [ j ] [ k ] )
data . append ( item )
vega [ "signals" ] = [ { "name" : "k" , "value" : h3 . shape [ 2 ] // 2 , "bind" : { "input" : "range" , "min" : 0 , "max" : h3 . shape [ 2 ] - 1 , "step" : 1 , "name" : ( h3 . axis_names [ 2 ] or "axis2" ) + " [slice]" } } ]
vega [ "data" ] = [ { "name" : "table" , "values" : data , "transform" : [ { "type" : "filter" , "expr" : "k == datum.k" , } ] } ]
vega [ "marks" ] = [ { "type" : "rect" , "from" : { "data" : "table" } , "encode" : { "enter" : { "x" : { "scale" : "xscale" , "field" : "x1" } , "x2" : { "scale" : "xscale" , "field" : "x2" } , "y" : { "scale" : "yscale" , "field" : "y1" } , "y2" : { "scale" : "yscale" , "field" : "y2" } , "fill" : { "scale" : "color" , "field" : "c" } , "stroke" : { "value" : 0 } , # " strokeWidth " : { " value " : 0 } ,
# " fillColor " : { " value " : " # ffff00 " }
} , # " update " : {
# " fillOpacity " : { " value " : 0.6}
# " hover " : {
# " fillOpacity " : { " value " : 0.5}
} } ]
if show_values :
vega [ "marks" ] . append ( { "type" : "text" , "from" : { "data" : "table" } , "encode" : { "enter" : { "align" : { "value" : "center" } , "baseline" : { "value" : "middle" } , "fontSize" : { "value" : 13 } , "fontWeight" : { "value" : "bold" } , "text" : { "field" : "label" } , "x" : { "scale" : "xscale" , "field" : "x" } , "y" : { "scale" : "yscale" , "field" : "y" } , } } } )
return vega |
def _handle_uniqueness ( self ) :
"""Checks marked as unique and unique _ together fields of the Model at each
creation and update , and if it violates the uniqueness raises IntegrityError .
First , looks at the fields which marked as " unique " . If Model ' s unique fields
did not change , it means that there is still a record at db with same unique
field values . So , it must be checked that if more than one result violates the
uniqueness . If it is , raise an IntegrityError . Otherwise , when marked as unique
fields in the list of changed fields , it must be checked that if exists any
violation instead of more than one . And , if it is , again raise an IntegrityError .
Then , looks at the fields which marked as " unique _ together " with the same logic .
Raises :
IntegrityError if unique and unique _ together checks does not pass""" | def _getattr ( u ) :
try :
return self . _field_values [ u ]
except KeyError :
return getattr ( self , u )
if self . _uniques :
for u in self . _uniques :
val = _getattr ( u )
changed_fields = self . changed_fields ( from_db = True )
if self . exist and not ( u in changed_fields if not callable ( val ) else ( str ( u ) + "_id" ) in changed_fields ) :
if val and self . objects . filter ( ** { u : val } ) . count ( ) > 1 :
raise IntegrityError ( "Unique mismatch: %s for %s already exists for value: " "%s" % ( u , self . __class__ . __name__ , val ) )
else :
if val and self . objects . filter ( ** { u : val } ) . count ( ) :
raise IntegrityError ( "Unique mismatch: %s for %s already exists for value: " "%s" % ( u , self . __class__ . __name__ , val ) )
if self . Meta . unique_together :
changed_fields = self . changed_fields ( from_db = True )
for uniques in self . Meta . unique_together :
vals = dict ( [ ( u , _getattr ( u ) ) for u in uniques ] )
if self . exist :
query_is_changed = [ ]
for uni in vals . keys ( ) :
if callable ( vals [ uni ] ) :
is_changed = ( str ( uni ) + "_id" ) in changed_fields
query_is_changed . append ( is_changed )
else :
is_changed = uni in changed_fields
query_is_changed . append ( is_changed )
is_unique_changed = any ( query_is_changed )
if not is_unique_changed :
if self . objects . filter ( ** vals ) . count ( ) > 1 :
raise IntegrityError ( "Unique together mismatch: %s combination already exists for %s" % ( vals , self . __class__ . __name__ ) )
else :
if self . objects . filter ( ** vals ) . count ( ) :
raise IntegrityError ( "Unique together mismatch: %s combination already exists for %s" % ( vals , self . __class__ . __name__ ) )
else :
if self . objects . filter ( ** vals ) . count ( ) :
raise IntegrityError ( "Unique together mismatch: %s combination already exists for %s" % ( vals , self . __class__ . __name__ ) ) |
def defaults ( self ) :
"""Reset the chart options and style to defaults""" | self . chart_style = { }
self . chart_opts = { }
self . style ( "color" , "#30A2DA" )
self . width ( 900 )
self . height ( 250 ) |
def get_location ( self ) :
"""Return the absolute location of this widget on the Screen , taking into account the
current state of the Frame that is displaying it and any label offsets of the Widget .
: returns : A tuple of the form ( < X coordinate > , < Y coordinate > ) .""" | origin = self . _frame . canvas . origin
return ( self . _x + origin [ 0 ] + self . _offset , self . _y + origin [ 1 ] - self . _frame . canvas . start_line ) |
def increase_writes_in_units ( current_provisioning , units , max_provisioned_writes , consumed_write_units_percent , log_tag ) :
"""Increase the current _ provisioning with units units
: type current _ provisioning : int
: param current _ provisioning : The current provisioning
: type units : int
: param units : How many units should we increase with
: returns : int - - New provisioning value
: type max _ provisioned _ writes : int
: param max _ provisioned _ writes : Configured max provisioned writes
: type consumed _ write _ units _ percent : float
: param consumed _ write _ units _ percent : Number of consumed write units
: type log _ tag : str
: param log _ tag : Prefix for the log""" | units = int ( units )
current_provisioning = float ( current_provisioning )
consumed_write_units_percent = float ( consumed_write_units_percent )
consumption_based_current_provisioning = int ( math . ceil ( current_provisioning * ( consumed_write_units_percent / 100 ) ) )
if consumption_based_current_provisioning > current_provisioning :
updated_provisioning = consumption_based_current_provisioning + units
else :
updated_provisioning = int ( current_provisioning ) + units
if max_provisioned_writes > 0 :
if updated_provisioning > max_provisioned_writes :
logger . info ( '{0} - Reached provisioned writes max limit: {1}' . format ( log_tag , max_provisioned_writes ) )
return max_provisioned_writes
logger . debug ( '{0} - Write provisioning will be increased to {1:d} units' . format ( log_tag , int ( updated_provisioning ) ) )
return updated_provisioning |
def get_identity ( user ) :
"""Create an identity for a given user instance .
Primarily useful for testing .""" | identity = Identity ( user . id )
if hasattr ( user , 'id' ) :
identity . provides . add ( UserNeed ( user . id ) )
for role in getattr ( user , 'roles' , [ ] ) :
identity . provides . add ( RoleNeed ( role . name ) )
identity . user = user
return identity |
def lookup_class_name ( name , context , depth = 3 ) :
"""given a table name in the form ` schema _ name ` . ` table _ name ` , find its class in the context .
: param name : ` schema _ name ` . ` table _ name `
: param context : dictionary representing the namespace
: param depth : search depth into imported modules , helps avoid infinite recursion .
: return : class name found in the context or None if not found""" | # breadth - first search
nodes = [ dict ( context = context , context_name = '' , depth = depth ) ]
while nodes :
node = nodes . pop ( 0 )
for member_name , member in node [ 'context' ] . items ( ) :
if not member_name . startswith ( '_' ) : # skip IPython ' s implicit variables
if inspect . isclass ( member ) and issubclass ( member , Table ) :
if member . full_table_name == name : # found it !
return '.' . join ( [ node [ 'context_name' ] , member_name ] ) . lstrip ( '.' )
try : # look for part tables
parts = member . _ordered_class_members
except AttributeError :
pass
# not a UserTable - - cannot have part tables .
else :
for part in ( getattr ( member , p ) for p in parts if p [ 0 ] . isupper ( ) and hasattr ( member , p ) ) :
if inspect . isclass ( part ) and issubclass ( part , Table ) and part . full_table_name == name :
return '.' . join ( [ node [ 'context_name' ] , member_name , part . __name__ ] ) . lstrip ( '.' )
elif node [ 'depth' ] > 0 and inspect . ismodule ( member ) and member . __name__ != 'datajoint' :
try :
nodes . append ( dict ( context = dict ( inspect . getmembers ( member ) ) , context_name = node [ 'context_name' ] + '.' + member_name , depth = node [ 'depth' ] - 1 ) )
except ImportError :
pass
# could not import , so do not attempt
return None |
def _option ( value ) :
'''Look up the value for an option .''' | if value in __opts__ :
return __opts__ [ value ]
master_opts = __pillar__ . get ( 'master' , { } )
if value in master_opts :
return master_opts [ value ]
if value in __pillar__ :
return __pillar__ [ value ] |
def _concat_nbest_translations ( translations : List [ Translation ] , stop_ids : Set [ int ] , length_penalty : LengthPenalty , brevity_penalty : Optional [ BrevityPenalty ] = None ) -> Translation :
"""Combines nbest translations through concatenation .
: param translations : A list of translations ( sequence starting with BOS symbol ,
attention _ matrix ) , score and length .
: param stop _ ids : The EOS symbols .
: param length _ penalty : LengthPenalty .
: param brevity _ penalty : Optional BrevityPenalty .
: return : A concatenation of the translations with a score .""" | expanded_translations = ( _expand_nbest_translation ( translation ) for translation in translations )
concatenated_translations = [ ]
# type : List [ Translation ]
for translations_to_concat in zip ( * expanded_translations ) :
concatenated_translations . append ( _concat_translations ( translations = list ( translations_to_concat ) , stop_ids = stop_ids , length_penalty = length_penalty , brevity_penalty = brevity_penalty ) )
return _reduce_nbest_translations ( concatenated_translations ) |
def __is_valid_type ( self , typ , typlist ) :
"""Check if type is valid based on input type list
" string " is special because it can be used for stringlist
: param typ : the type to check
: param typlist : the list of type to check
: return : True on success , False otherwise""" | typ_is_str = typ == "string"
str_list_in_typlist = "stringlist" in typlist
return typ in typlist or ( typ_is_str and str_list_in_typlist ) |
def update ( self , other = None , ** kwargs ) :
"""Set metadata values from the given iterable ` other ` and kwargs .
Behavior is like ` dict . update ` : If ` other ` has a ` ` keys ` ` method ,
they are looped over and ` ` self [ key ] ` ` is assigned ` ` other [ key ] ` ` .
Else , ` ` other ` ` is an iterable of ` ` ( key , value ) ` ` iterables .
Keys that don ' t match a metadata field or that have an empty value are
dropped .""" | def _set ( key , value ) :
if key in _ATTR2FIELD and value :
self . set ( self . _convert_name ( key ) , value )
if not other : # other is None or empty container
pass
elif hasattr ( other , 'keys' ) :
for k in other . keys ( ) :
_set ( k , other [ k ] )
else :
for k , v in other :
_set ( k , v )
if kwargs :
for k , v in kwargs . items ( ) :
_set ( k , v ) |
def metric ( self , name , count , elapsed ) :
"""A metric function that writes a single CSV file
: arg str name : name of the metric
: arg int count : number of items
: arg float elapsed : time in seconds""" | if name is None :
warnings . warn ( "Ignoring unnamed metric" , stacklevel = 3 )
return
with self . lock :
self . writer . writerow ( ( name , count , "%f" % elapsed ) ) |
def relation_ ( self , table , origin_field , search_field , destination_field = None , id_field = "id" ) :
"""Returns a DataSwim instance with a column filled from a relation foreign key""" | df = self . _relation ( table , origin_field , search_field , destination_field , id_field )
return self . _duplicate_ ( df ) |
def _should_use_fr_error_handler ( self ) :
"""Determine if error should be handled with FR or default Flask
The goal is to return Flask error handlers for non - FR - related routes ,
and FR errors ( with the correct media type ) for FR endpoints . This
method currently handles 404 and 405 errors .
: return : bool""" | adapter = current_app . create_url_adapter ( request )
try :
adapter . match ( )
except MethodNotAllowed as e : # Check if the other HTTP methods at this url would hit the Api
valid_route_method = e . valid_methods [ 0 ]
rule , _ = adapter . match ( method = valid_route_method , return_rule = True )
return self . owns_endpoint ( rule . endpoint )
except NotFound :
return self . catch_all_404s
except : # Werkzeug throws other kinds of exceptions , such as Redirect
pass |
def explore_show_head ( self , uri , check_headers = None ) :
"""Do HEAD on uri and show infomation .
Will also check headers against any values specified in
check _ headers .""" | print ( "HEAD %s" % ( uri ) )
if ( re . match ( r'^\w+:' , uri ) ) : # Looks like a URI
response = requests . head ( uri )
else : # Mock up response if we have a local file
response = self . head_on_file ( uri )
print ( " status: %s" % ( response . status_code ) )
if ( response . status_code == '200' ) : # print some of the headers
for header in [ 'content-length' , 'last-modified' , 'lastmod' , 'content-type' , 'etag' ] :
if header in response . headers :
check_str = ''
if ( check_headers is not None and header in check_headers ) :
if ( response . headers [ header ] == check_headers [ header ] ) :
check_str = ' MATCHES EXPECTED VALUE'
else :
check_str = ' EXPECTED %s' % ( check_headers [ header ] )
print ( " %s: %s%s" % ( header , response . headers [ header ] , check_str ) ) |
def program_unit_name ( line : str ) -> str :
"""Given a line that starts a program unit , i . e . , a program , module ,
subprogram , or function , this function returns the name associated
with that program unit .""" | match = RE_PGM_UNIT_START . match ( line )
assert match != None
return match . group ( 2 ) |
def num_pages ( self ) :
"""Returns the total number of pages .""" | if self . count == 0 and not self . allow_empty_first_page :
return 0
hits = max ( 1 , self . count - self . orphans )
return int ( ceil ( hits / float ( self . per_page ) ) ) |
def draw_hist ( self , metric , title = "" ) :
"""Draw a series of histograms of the selected keys over different
training steps .""" | # TODO : assert isinstance ( list ( values . values ( ) ) [ 0 ] , np . ndarray )
rows = 1
cols = 1
limit = 10
# max steps to show
# We need a 3D projection Subplot , so ignore the one provided to
# as an create a new one .
ax = self . figure . add_subplot ( self . gs , projection = "3d" )
ax . view_init ( 30 , - 80 )
# Compute histograms
verts = [ ]
area_colors = [ ]
edge_colors = [ ]
for i , s in enumerate ( metric . steps [ - limit : ] ) :
hist , edges = np . histogram ( metric . data [ - i - 1 : ] )
# X is bin centers
x = np . diff ( edges ) / 2 + edges [ : - 1 ]
# Y is hist values
y = hist
x = np . concatenate ( [ x [ 0 : 1 ] , x , x [ - 1 : ] ] )
y = np . concatenate ( [ [ 0 ] , y , [ 0 ] ] )
# Ranges
if i == 0 :
x_min = x . min ( )
x_max = x . max ( )
y_min = y . min ( )
y_max = y . max ( )
x_min = np . minimum ( x_min , x . min ( ) )
x_max = np . maximum ( x_max , x . max ( ) )
y_min = np . minimum ( y_min , y . min ( ) )
y_max = np . maximum ( y_max , y . max ( ) )
alpha = 0.8 * ( i + 1 ) / min ( limit , len ( metric . steps ) )
verts . append ( list ( zip ( x , y ) ) )
area_colors . append ( np . array ( self . theme [ "hist_color" ] + [ alpha ] ) )
edge_colors . append ( np . array ( self . theme [ "hist_outline_color" ] + [ alpha ] ) )
poly = PolyCollection ( verts , facecolors = area_colors , edgecolors = edge_colors )
ax . add_collection3d ( poly , zs = list ( range ( min ( limit , len ( metric . steps ) ) ) ) , zdir = 'y' )
ax . set_xlim ( x_min , x_max )
ax . set_ylim ( 0 , limit )
ax . set_yticklabels ( metric . formatted_steps [ - limit : ] )
ax . set_zlim ( y_min , y_max )
ax . set_title ( metric . name ) |
def get_digest ( self ) :
"""return int uuid number for digest
: rtype : int
: return : digest""" | a , b = struct . unpack ( '>QQ' , self . digest )
return ( a << 64 ) | b |
def logout_oauth2 ( self ) :
'''Logout for given Oauth2 bearer token''' | url = "https://api.robinhood.com/oauth2/revoke_token/"
data = { "client_id" : CLIENT_ID , "token" : self . refresh_token , }
res = self . post ( url , payload = data )
if res is None :
self . account_id = None
self . account_url = None
self . access_token = None
self . refresh_token = None
self . mfa_code = None
self . scope = None
self . authenticated = False
return True
else :
raise AuthenticationError ( "fast_arrow could not log out." ) |
def compute_venn2_subsets ( a , b ) :
'''Given two set or Counter objects , computes the sizes of ( a & ~ b , b & ~ a , a & b ) .
Returns the result as a tuple .
> > > compute _ venn2 _ subsets ( set ( [ 1,2,3,4 ] ) , set ( [ 2,3,4,5,6 ] ) )
(1 , 2 , 3)
> > > compute _ venn2 _ subsets ( Counter ( [ 1,2,3,4 ] ) , Counter ( [ 2,3,4,5,6 ] ) )
(1 , 2 , 3)
> > > compute _ venn2 _ subsets ( Counter ( [ ] ) , Counter ( [ ] ) )
(0 , 0 , 0)
> > > compute _ venn2 _ subsets ( set ( [ ] ) , set ( [ ] ) )
(0 , 0 , 0)
> > > compute _ venn2 _ subsets ( set ( [ 1 ] ) , set ( [ ] ) )
(1 , 0 , 0)
> > > compute _ venn2 _ subsets ( set ( [ 1 ] ) , set ( [ 1 ] ) )
(0 , 0 , 1)
> > > compute _ venn2 _ subsets ( Counter ( [ 1 ] ) , Counter ( [ 1 ] ) )
(0 , 0 , 1)
> > > compute _ venn2 _ subsets ( set ( [ 1,2 ] ) , set ( [ 1 ] ) )
(1 , 0 , 1)
> > > compute _ venn2 _ subsets ( Counter ( [ 1,1,2,2,2 ] ) , Counter ( [ 1,2,3,3 ] ) )
(3 , 2 , 2)
> > > compute _ venn2 _ subsets ( Counter ( [ 1,1,2 ] ) , Counter ( [ 1,2,2 ] ) )
(1 , 1 , 2)
> > > compute _ venn2 _ subsets ( Counter ( [ 1,1 ] ) , set ( [ ] ) )
Traceback ( most recent call last ) :
ValueError : Both arguments must be of the same type''' | if not ( type ( a ) == type ( b ) ) :
raise ValueError ( "Both arguments must be of the same type" )
set_size = len if type ( a ) != Counter else lambda x : sum ( x . values ( ) )
# We cannot use len to compute the cardinality of a Counter
return ( set_size ( a - b ) , set_size ( b - a ) , set_size ( a & b ) ) |
def EvalGeneric ( self , hashers = None ) :
"""Causes the entire file to be hashed by the given hash functions .
This sets up a ' finger ' for fingerprinting , where the entire file
is passed through a pre - defined ( or user defined ) set of hash functions .
Args :
hashers : An iterable of hash classes ( e . g . out of hashlib ) which will
be instantiated for use . If hashers is not provided , or is
provided as ' None ' , the default hashers will get used . To
invoke this without hashers , provide an empty list .
Returns :
Always True , as all files are ' generic ' files .""" | if hashers is None :
hashers = Fingerprinter . GENERIC_HASH_CLASSES
hashfuncs = [ x ( ) for x in hashers ]
finger = Finger ( hashfuncs , [ Range ( 0 , self . filelength ) ] , { 'name' : 'generic' } )
self . fingers . append ( finger )
return True |
def get_ngroups ( self , field = None ) :
'''Returns ngroups count if it was specified in the query , otherwise ValueError .
If grouping on more than one field , provide the field argument to specify which count you are looking for .''' | field = field if field else self . _determine_group_field ( field )
if 'ngroups' in self . data [ 'grouped' ] [ field ] :
return self . data [ 'grouped' ] [ field ] [ 'ngroups' ]
raise ValueError ( "ngroups not found in response. specify group.ngroups in the query." ) |
def _remove_api_url_from_link ( link ) :
'''Remove the API URL from the link if it is there''' | if link . startswith ( _api_url ( ) ) :
link = link [ len ( _api_url ( ) ) : ]
if link . startswith ( _api_url ( mirror = True ) ) :
link = link [ len ( _api_url ( mirror = True ) ) : ]
return link |
def cloneQuery ( self , limit = _noItem , sort = _noItem ) :
"""Clone the original query which this distinct query wraps , and return a new
wrapper around that clone .""" | newq = self . query . cloneQuery ( limit = limit , sort = sort )
return self . __class__ ( newq ) |
def sample_group ( sid , groups ) :
"""Iterate through all categories in an OrderedDict and return category name if SampleID
present in that category .
: type sid : str
: param sid : SampleID from dataset .
: type groups : OrderedDict
: param groups : Returned dict from phylotoast . util . gather _ categories ( ) function .
: return type : str
: return : Category name used to classify ` sid ` .""" | for name in groups :
if sid in groups [ name ] . sids :
return name |
def update ( self , other ) :
"""Update the collection with items from * other * . Accepts other
: class : ` SortedSetBase ` instances , dictionaries mapping members to
numeric scores , or sequences of ` ` ( member , score ) ` ` tuples .""" | def update_trans ( pipe ) :
other_items = method ( pipe = pipe ) if use_redis else method ( )
pipe . multi ( )
for member , score in other_items :
pipe . zadd ( self . key , { self . _pickle ( member ) : float ( score ) } )
watches = [ ]
if self . _same_redis ( other , RedisCollection ) :
use_redis = True
watches . append ( other . key )
else :
use_redis = False
if hasattr ( other , 'items' ) :
method = other . items
elif hasattr ( other , '__iter__' ) :
method = other . __iter__
self . _transaction ( update_trans , * watches ) |
def _record_app_data ( self , data ) :
"""Parse raw metadata output for a single application
The usual output is :
ApplicationName RevisionNumber
ApplicationName ROOT _ Version
ApplicationName KM3NET
ApplicationName . / command / line - - arguments - - which - - can
contain
also
multiple lines
and - - addtional flags
etc .
ApplicationName Linux . . . ( just the ` uname - a ` output )""" | name , revision = data [ 0 ] . split ( )
root_version = data [ 1 ] . split ( ) [ 1 ]
command = b'\n' . join ( data [ 3 : ] ) . split ( b'\n' + name + b' Linux' ) [ 0 ]
self . meta . append ( { 'application_name' : np . string_ ( name ) , 'revision' : np . string_ ( revision ) , 'root_version' : np . string_ ( root_version ) , 'command' : np . string_ ( command ) } ) |
def ints ( self , qlist ) :
"""Converts a sequence of pegasus _ index node labels into
linear _ index node labels , preserving order
Parameters
qlist : sequence of ints
The pegasus _ index node labels
Returns
rlist : iterable of tuples
The linear _ lindex node lables corresponding to qlist""" | m , m1 = self . args
return ( ( ( m * u + w ) * 12 + k ) * m1 + z for ( u , w , k , z ) in qlist ) |
def read_namespaced_role_binding ( self , name , namespace , ** kwargs ) : # noqa : E501
"""read _ namespaced _ role _ binding # noqa : E501
read the specified RoleBinding # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . read _ namespaced _ role _ binding ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the RoleBinding ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: return : V1RoleBinding
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . read_namespaced_role_binding_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
else :
( data ) = self . read_namespaced_role_binding_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
return data |
def update_item ( self , table_name , key , attribute_updates , expected = None , return_values = None , object_hook = None ) :
"""Edits an existing item ' s attributes . You can perform a conditional
update ( insert a new attribute name - value pair if it doesn ' t exist ,
or replace an existing name - value pair if it has certain expected
attribute values ) .
: type table _ name : str
: param table _ name : The name of the table .
: type key : dict
: param key : A Python version of the Key data structure
defined by DynamoDB which identifies the item to be updated .
: type attribute _ updates : dict
: param attribute _ updates : A Python version of the AttributeUpdates
data structure defined by DynamoDB .
: type expected : dict
: param expected : A Python version of the Expected
data structure defined by DynamoDB .
: type return _ values : str
: param return _ values : Controls the return of attribute
name - value pairs before then were changed . Possible
values are : None or ' ALL _ OLD ' . If ' ALL _ OLD ' is
specified and the item is overwritten , the content
of the old item is returned .""" | data = { 'TableName' : table_name , 'Key' : key , 'AttributeUpdates' : attribute_updates }
if expected :
data [ 'Expected' ] = expected
if return_values :
data [ 'ReturnValues' ] = return_values
json_input = json . dumps ( data )
return self . make_request ( 'UpdateItem' , json_input , object_hook = object_hook ) |
def deserialize_frame ( stream , header , verifier = None ) :
"""Deserializes a frame from a body .
: param stream : Source data stream
: type stream : io . BytesIO
: param header : Deserialized header
: type header : aws _ encryption _ sdk . structures . MessageHeader
: param verifier : Signature verifier object ( optional )
: type verifier : aws _ encryption _ sdk . internal . crypto . Verifier
: returns : Deserialized frame and a boolean stating if this is the final frame
: rtype : : class : ` aws _ encryption _ sdk . internal . structures . MessageFrameBody ` and bool""" | _LOGGER . debug ( "Starting frame deserialization" )
frame_data = { }
final_frame = False
( sequence_number , ) = unpack_values ( ">I" , stream , verifier )
if sequence_number == SequenceIdentifier . SEQUENCE_NUMBER_END . value :
_LOGGER . debug ( "Deserializing final frame" )
( sequence_number , ) = unpack_values ( ">I" , stream , verifier )
final_frame = True
else :
_LOGGER . debug ( "Deserializing frame sequence number %d" , int ( sequence_number ) )
frame_data [ "final_frame" ] = final_frame
frame_data [ "sequence_number" ] = sequence_number
( frame_iv , ) = unpack_values ( ">{iv_len}s" . format ( iv_len = header . algorithm . iv_len ) , stream , verifier )
frame_data [ "iv" ] = frame_iv
if final_frame is True :
( content_length , ) = unpack_values ( ">I" , stream , verifier )
if content_length >= header . frame_length :
raise SerializationError ( "Invalid final frame length: {final} >= {normal}" . format ( final = content_length , normal = header . frame_length ) )
else :
content_length = header . frame_length
( frame_content , frame_tag ) = unpack_values ( ">{content_len}s{auth_len}s" . format ( content_len = content_length , auth_len = header . algorithm . auth_len ) , stream , verifier , )
frame_data [ "ciphertext" ] = frame_content
frame_data [ "tag" ] = frame_tag
return MessageFrameBody ( ** frame_data ) , final_frame |
def zip ( self , * items ) :
"""Zip the collection together with one or more arrays .
: param items : The items to zip
: type items : list
: rtype : Collection""" | return self . __class__ ( list ( zip ( self . items , * items ) ) ) |
def match ( tgt , delimiter = DEFAULT_TARGET_DELIM , opts = None ) :
'''Matches a grain based on regex''' | if not opts :
opts = __opts__
log . debug ( 'grains pcre target: %s' , tgt )
if delimiter not in tgt :
log . error ( 'Got insufficient arguments for grains pcre match ' 'statement from master' )
return False
return salt . utils . data . subdict_match ( opts [ 'grains' ] , tgt , delimiter = delimiter , regex_match = True ) |
def Validate ( self , problems = default_problem_reporter ) :
"""Validate attribute values and this object ' s internal consistency .
Returns :
True iff all validation checks passed .""" | found_problem = False
found_problem = ( ( not util . ValidateRequiredFieldsAreNotEmpty ( self , self . _REQUIRED_FIELD_NAMES , problems ) ) or found_problem )
found_problem = self . ValidateAgencyUrl ( problems ) or found_problem
found_problem = self . ValidateAgencyLang ( problems ) or found_problem
found_problem = self . ValidateAgencyTimezone ( problems ) or found_problem
found_problem = self . ValidateAgencyFareUrl ( problems ) or found_problem
found_problem = self . ValidateAgencyEmail ( problems ) or found_problem
return not found_problem |
def infer_struct ( value : Mapping [ str , GenericAny ] ) -> Struct :
"""Infer the : class : ` ~ ibis . expr . datatypes . Struct ` type of ` value ` .""" | if not value :
raise TypeError ( 'Empty struct type not supported' )
return Struct ( list ( value . keys ( ) ) , list ( map ( infer , value . values ( ) ) ) ) |
def lgammln ( xx ) :
"""Returns the gamma function of xx .
Gamma ( z ) = Integral ( 0 , infinity ) of t ^ ( z - 1 ) exp ( - t ) dt .
( Adapted from : Numerical Recipies in C . )
Usage : lgammln ( xx )""" | coeff = [ 76.18009173 , - 86.50532033 , 24.01409822 , - 1.231739516 , 0.120858003e-2 , - 0.536382e-5 ]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - ( x + 0.5 ) * math . log ( tmp )
ser = 1.0
for j in range ( len ( coeff ) ) :
x = x + 1
ser = ser + coeff [ j ] / x
return - tmp + math . log ( 2.50662827465 * ser ) |
def stop ( self ) :
"""Stop the primitive manager .""" | for p in self . primitives [ : ] :
p . stop ( )
StoppableLoopThread . stop ( self ) |
def SecurityCheck ( self , func , request , * args , ** kwargs ) :
"""A decorator applied to protected web handlers .""" | request . user = self . username
request . token = access_control . ACLToken ( username = "Testing" , reason = "Just a test" )
return func ( request , * args , ** kwargs ) |
def sponsor_menu ( root_menu , menu = "sponsors" , label = _ ( "Sponsors" ) , sponsors_item = _ ( "Our sponsors" ) , packages_item = _ ( "Sponsorship packages" ) ) :
"""Add sponsor menu links .""" | root_menu . add_menu ( menu , label , items = [ ] )
for sponsor in ( Sponsor . objects . all ( ) . order_by ( 'packages' , 'order' , 'id' ) . prefetch_related ( 'packages' ) ) :
symbols = sponsor . symbols ( )
if symbols :
item_name = u"» %s %s" % ( sponsor . name , symbols )
else :
item_name = u"» %s" % ( sponsor . name , )
with menu_logger ( logger , "sponsor %r" % ( sponsor . name , ) ) :
root_menu . add_item ( item_name , sponsor . get_absolute_url ( ) , menu = menu )
if sponsors_item :
with menu_logger ( logger , "sponsors page link" ) :
root_menu . add_item ( sponsors_item , reverse ( "wafer_sponsors" ) , menu )
if packages_item :
with menu_logger ( logger , "sponsorship package page link" ) :
root_menu . add_item ( packages_item , reverse ( "wafer_sponsorship_packages" ) , menu ) |
def image_get_by_alias ( alias , remote_addr = None , cert = None , key = None , verify_cert = True , _raw = False ) :
'''Get an image by an alias
alias :
The alias of the image to retrieve
remote _ addr :
An URL to a remote Server , you also have to give cert and key if
you provide remote _ addr and its a TCP Address !
Examples :
https : / / myserver . lan : 8443
/ var / lib / mysocket . sock
cert :
PEM Formatted SSL Certificate .
Examples :
~ / . config / lxc / client . crt
key :
PEM Formatted SSL Key .
Examples :
~ / . config / lxc / client . key
verify _ cert : True
Wherever to verify the cert , this is by default True
but in the most cases you want to set it off as LXD
normaly uses self - signed certificates .
_ raw : False
Return the raw pylxd object or a dict of it ?
CLI Examples :
. . code - block : : bash
$ salt ' * ' lxd . image _ get _ by _ alias xenial / amd64''' | client = pylxd_client_get ( remote_addr , cert , key , verify_cert )
image = None
try :
image = client . images . get_by_alias ( alias )
except pylxd . exceptions . LXDAPIException :
raise SaltInvocationError ( 'Image with alias \'{0}\' not found' . format ( alias ) )
if _raw :
return image
return _pylxd_model_to_dict ( image ) |
def _set_value ( self , value ) :
"""Called by a Job object to tell the result is ready , and
provides the value of this result . The object will become
ready and successful . The collector ' s notify _ ready ( ) method
will be called , and the callback method too""" | assert not self . ready ( )
self . _data = value
self . _success = True
self . _event . set ( )
if self . _collector is not None :
self . _collector . notify_ready ( self )
if self . _callback is not None :
try :
self . _callback ( value )
except :
traceback . print_exc ( ) |
def convert ( ** kwargs ) :
"""EXAMPLE DOCSTRING for function ( you would usually put the discription here )
Parameters
user : colon delimited list of analysts ( default : " " )
magfile : input magnetometer file ( required )
Returns
type - Tuple : ( True or False indicating if conversion was sucessful , meas _ file name written )""" | # get parameters from kwargs . get ( parameter _ name , default _ value )
user = kwargs . get ( 'user' , '' )
magfile = kwargs . get ( 'magfile' )
# do any extra formating you need to variables here
# open magfile to start reading data
try :
infile = open ( magfile , 'r' )
except Exception as ex :
print ( ( "bad file path: " , magfile ) )
return False , "bad file path"
# Depending on the dataset you may need to read in all data here put it in a list of dictionaries or something here . If you do just replace the " for line in infile . readlines ( ) : " bellow with " for d in data : " where data is the structure you put your data into
# define the lists that hold each line of data for their respective tables
SpecRecs , SampRecs , SiteRecs , LocRecs , MeasRecs = [ ] , [ ] , [ ] , [ ] , [ ]
# itterate over the contence of the file
for line in infile . readlines ( ) :
MeasRec , SpecRec , SampRec , SiteRec , LocRec = { } , { } , { } , { } , { }
# extract data from line and put it in variables
# fill this line of the Specimen table using above variables
if specimen != "" and specimen not in [ x [ 'specimen' ] if 'specimen' in list ( x . keys ( ) ) else "" for x in SpecRecs ] :
SpecRec [ 'analysts' ] = user
SpecRecs . append ( SpecRec )
# fill this line of the Sample table using above variables
if sample != "" and sample not in [ x [ 'sample' ] if 'sample' in list ( x . keys ( ) ) else "" for x in SampRecs ] :
SampRec [ 'analysts' ] = user
SampRecs . append ( SampRec )
# fill this line of the Site table using above variables
if site != "" and site not in [ x [ 'site' ] if 'site' in list ( x . keys ( ) ) else "" for x in SiteRecs ] :
SiteRec [ 'analysts' ] = user
SiteRecs . append ( SiteRec )
# fill this line of the Location table using above variables
if location != "" and location not in [ x [ 'location' ] if 'location' in list ( x . keys ( ) ) else "" for x in LocRecs ] :
LocRec [ 'analysts' ] = user
LocRecs . append ( LocRec )
# Fill this line of Meas Table using data in line
MeasRec [ 'analysts' ] = user
MeasRecs . append ( MeasRec )
# close your file object so Python3 doesn ' t throw an annoying warning
infile . close ( )
# open a Contribution object
con = cb . Contribution ( output_dir_path , read_tables = [ ] )
# Create Magic Tables and add to a contribution
con . add_magic_table_from_data ( dtype = 'specimens' , data = SpecRecs )
con . add_magic_table_from_data ( dtype = 'samples' , data = SampRecs )
con . add_magic_table_from_data ( dtype = 'sites' , data = SiteRecs )
con . add_magic_table_from_data ( dtype = 'locations' , data = LocRecs )
MeasOuts = pmag . measurements_methods3 ( MeasRecs , noave )
# figures out method codes for measuremet data
con . add_magic_table_from_data ( dtype = 'measurements' , data = MeasOuts )
# write to file
con . write_table_to_file ( 'specimens' , custom_name = spec_file )
con . write_table_to_file ( 'samples' , custom_name = samp_file )
con . write_table_to_file ( 'sites' , custom_name = site_file )
con . write_table_to_file ( 'locations' , custom_name = loc_file )
meas_file = con . write_table_to_file ( 'measurements' , custom_name = meas_file )
return True , meas_file |
def sato ( target , mol_weight = 'pore.molecular_weight' , boiling_temperature = 'pore.boiling_point' , temperature = 'pore.temperature' , critical_temperature = 'pore.critical_temperature' ) :
r"""Uses Sato et al . model to estimate thermal conductivity for pure liquids
from first principles at conditions of interest
Parameters
target : OpenPNM Object
The object for which these values are being calculated . This
controls the length of the calculated array , and also provides
access to other necessary thermofluid properties .
boiling _ temperature : string
Dictionary key containing the toiling temperature of the component ( K )
mol _ weight : string
Dictionary key containing the molecular weight of the component
( kg / mol )
temperature : string
The dictionary key containing the temperature values ( K )
critical _ temperature : string
The dictionary key containing the critical temperature values ( K )""" | T = target [ temperature ]
Tc = target [ critical_temperature ]
MW = target [ mol_weight ]
Tbr = target [ boiling_temperature ] / Tc
Tr = T / Tc
value = ( 1.11 / ( ( MW * 1e3 ) ** 0.5 ) ) * ( 3 + 20 * ( 1 - Tr ) ** ( 2 / 3 ) ) / ( 3 + 20 * ( 1 - Tbr ) ** ( 2 / 3 ) )
return value |
def enable_wrapper ( cls , enable , new_class ) :
"""Wrap the enable method to call pre and post enable signals and update
module status""" | def _wrapped ( self , * args , ** kwargs ) :
if not self . installed :
raise AssertionError ( 'Module %s cannot be enabled' ', you should install it first' % self . verbose_name )
if self . enabled :
raise AssertionError ( 'Module %s is already enabled' % self . verbose_name )
logger . info ( "Enabling %s module" % self . verbose_name )
pre_enable . send ( sender = self )
res = enable ( self , * args , ** kwargs )
# Register interfaces ( if present )
if isinstance ( self , DropletInterface ) :
self . register ( )
post_enable . send ( sender = self )
info = self . _info
info . status = ModuleInfo . ENABLED
info . save ( )
logger . info ( "Enabled %s module" % self . verbose_name )
return res
return _wrapped |
def replace_table_rate_shipping_by_id ( cls , table_rate_shipping_id , table_rate_shipping , ** kwargs ) :
"""Replace TableRateShipping
Replace all attributes of TableRateShipping
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . replace _ table _ rate _ shipping _ by _ id ( table _ rate _ shipping _ id , table _ rate _ shipping , async = True )
> > > result = thread . get ( )
: param async bool
: param str table _ rate _ shipping _ id : ID of tableRateShipping to replace ( required )
: param TableRateShipping table _ rate _ shipping : Attributes of tableRateShipping to replace ( required )
: return : TableRateShipping
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _replace_table_rate_shipping_by_id_with_http_info ( table_rate_shipping_id , table_rate_shipping , ** kwargs )
else :
( data ) = cls . _replace_table_rate_shipping_by_id_with_http_info ( table_rate_shipping_id , table_rate_shipping , ** kwargs )
return data |
def validate ( self , value ) :
"""Abstract method , check if a value is correct ( type ) .
Should raise : class : ` TypeError ` if the type the validation fail .
: param value : the value to validate
: return : the given value ( that may have been converted )""" | for validator in self . validators :
errors = [ ]
try :
validator ( value )
except ValidationError as err :
errors . append ( err )
if errors :
raise ValidationError ( errors )
return value |
def createProduct ( self , powerups ) :
"""Create a new L { Product } instance which confers the given
powerups .
@ type powerups : C { list } of powerup item types
@ rtype : L { Product }
@ return : The new product instance .""" | types = [ qual ( powerup ) . decode ( 'ascii' ) for powerup in powerups ]
for p in self . store . parent . query ( Product ) :
for t in types :
if t in p . types :
raise ValueError ( "%s is already included in a Product" % ( t , ) )
return Product ( store = self . store . parent , types = types ) |
def tRNAscan ( args ) :
"""% prog tRNAscan all . trna > all . trna . gff3
Convert tRNAscan - SE output into gff3 format .
Sequence tRNA Bounds tRNA Anti Intron Bounds Cove
Name tRNA # Begin End Type Codon Begin End Score
23231 1 335355 335440 Tyr GTA 335392 335404 69.21
23231 2 1076190 1076270 Leu AAG 0 0 66.33
Conversion based on PERL one - liner in :
< https : / / github . com / sujaikumar / assemblage / blob / master / README - annotation . md >""" | from jcvi . formats . gff import sort
p = OptionParser ( tRNAscan . __doc__ )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
trnaout , = args
gffout = trnaout + ".gff3"
fp = open ( trnaout )
fw = open ( gffout , "w" )
next ( fp )
next ( fp )
row = next ( fp )
assert row . startswith ( "--------" )
for row in fp :
atoms = [ x . strip ( ) for x in row . split ( "\t" ) ]
contig , trnanum , start , end , aa , codon , intron_start , intron_end , score = atoms
start , end = int ( start ) , int ( end )
orientation = '+'
if start > end :
start , end = end , start
orientation = '-'
source = "tRNAscan"
type = "tRNA"
if codon == "???" :
codon = "XXX"
comment = "ID={0}.tRNA.{1};Name=tRNA-{2} (anticodon: {3})" . format ( contig , trnanum , aa , codon )
print ( "\t" . join ( str ( x ) for x in ( contig , source , type , start , end , score , orientation , "." , comment ) ) , file = fw )
fw . close ( )
sort ( [ gffout , "-i" ] ) |
def _import_attr_n_module ( module_name , attr ) :
"""From the given ` ` module _ name ` ` import
the value for ` ` attr ` ` ( attribute ) .""" | __import__ ( module_name )
module = sys . modules [ module_name ]
attr = getattr ( module , attr )
return attr , module |
def size ( array ) :
"""Return a human - readable description of the number of bytes required
to store the data of the given array .
For example : :
> > > array . nbytes
1400000
> > biggus . size ( array )
'13.35 MiB '
Parameters
array : array - like object
The array object must provide an ` nbytes ` property .
Returns
out : str
The Array representing the requested mean .""" | nbytes = array . nbytes
if nbytes < ( 1 << 10 ) :
size = '{} B' . format ( nbytes )
elif nbytes < ( 1 << 20 ) :
size = '{:.02f} KiB' . format ( nbytes / ( 1 << 10 ) )
elif nbytes < ( 1 << 30 ) :
size = '{:.02f} MiB' . format ( nbytes / ( 1 << 20 ) )
elif nbytes < ( 1 << 40 ) :
size = '{:.02f} GiB' . format ( nbytes / ( 1 << 30 ) )
else :
size = '{:.02f} TiB' . format ( nbytes / ( 1 << 40 ) )
return size |
def connect ( self ) :
"""Create internal connection to AMQP service .""" | logging . info ( "Connecting to {} with user {}." . format ( self . host , self . username ) )
credentials = pika . PlainCredentials ( self . username , self . password )
connection_params = pika . ConnectionParameters ( host = self . host , credentials = credentials , heartbeat_interval = self . heartbeat_interval )
self . connection = pika . BlockingConnection ( connection_params ) |
def refresh ( * modules : typing . Union [ str , types . ModuleType ] , recursive : bool = False , force : bool = False ) -> bool :
"""Checks the specified module or modules for changes and reloads them if
they have been changed since the module was first imported or last
refreshed .
: param modules :
One or more module objects that should be refreshed if they the
currently loaded versions are out of date . The package name for
modules can also be used .
: param recursive :
When true , any imported sub - modules of this module will also be
refreshed if they have been updated .
: param force :
When true , all modules will be refreshed even if it doesn ' t appear
that they have been updated .
: return :
True or False depending on whether any modules were refreshed by this
call .""" | out = [ ]
for module in modules :
out . append ( reload_module ( module , recursive , force ) )
return any ( out ) |
def bins ( self ) :
"""Bins in the wider format ( as edge pairs )
Returns
bins : np . ndarray
shape = ( bin _ count , 2)""" | if self . _bins is None :
self . _bins = make_bin_array ( self . numpy_bins )
return self . _bins |
def validate_response ( self , resp ) :
"""Validates resp against expected return type for this function .
Raises RpcException if the response is invalid .""" | ok , msg = self . contract . validate ( self . returns , self . returns . is_array , resp )
if not ok :
vals = ( self . full_name , str ( resp ) , msg )
msg = "Function '%s' invalid response: '%s'. %s" % vals
raise RpcException ( ERR_INVALID_RESP , msg ) |
def gradient ( self , mu , dist ) :
"""derivative of the link function wrt mu
Parameters
mu : array - like of legth n
dist : Distribution instance
Returns
grad : np . array of length n""" | return dist . levels / ( mu * ( dist . levels - mu ) ) |
def in6_cidr2mask ( m ) :
"""Return the mask ( bitstring ) associated with provided length
value . For instance if function is called on 48 , return value is
' \xff \xff \xff \xff \xff \xff \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 \x00 ' .""" | if m > 128 or m < 0 :
raise Kamene_Exception ( "value provided to in6_cidr2mask outside [0, 128] domain (%d)" % m )
t = [ ]
for i in range ( 0 , 4 ) :
t . append ( max ( 0 , 2 ** 32 - 2 ** ( 32 - min ( 32 , m ) ) ) )
m -= 32
return b"" . join ( [ struct . pack ( '!I' , i ) for i in t ] ) |
def _designspace_locations ( self , designspace ) :
"""Map font filenames to their locations in a designspace .""" | maps = [ ]
for elements in ( designspace . sources , designspace . instances ) :
location_map = { }
for element in elements :
path = _normpath ( element . path )
location_map [ path ] = element . location
maps . append ( location_map )
return maps |
def convert_value ( self , value ) :
"""Convert value to float . If value is a string , ensure that the first
character is the same as symbol ie . the value is in the currency this
formatter is representing .""" | if isinstance ( value , str ) :
assert value . startswith ( self . symbol ) , "Currency does not start with " + self . symbol
value = value . lstrip ( self . symbol )
return super ( ) . convert_value ( value ) |
def set_viewlimits ( self , axes = None ) :
"""update xy limits of a plot""" | if axes is None :
axes = self . axes
xmin , xmax , ymin , ymax = self . data_range
if len ( self . conf . zoom_lims ) > 1 :
zlims = self . conf . zoom_lims [ - 1 ]
if axes in zlims :
xmin , xmax , ymin , ymax = zlims [ axes ]
xmin = max ( self . data_range [ 0 ] , xmin )
xmax = min ( self . data_range [ 1 ] , xmax )
ymin = max ( self . data_range [ 2 ] , ymin )
ymax = min ( self . data_range [ 3 ] , ymax )
if ( xmax < self . data_range [ 0 ] or xmin > self . data_range [ 1 ] or ymax < self . data_range [ 2 ] or ymin > self . data_range [ 3 ] ) :
self . conf . zoom_lims . pop ( )
return
if abs ( xmax - xmin ) < 2 :
xmin = int ( 0.5 * ( xmax + xmin ) - 1 )
xmax = xmin + 2
if abs ( ymax - ymin ) < 2 :
ymin = int ( 0.5 * ( ymax + xmin ) - 1 )
ymax = ymin + 2
self . axes . set_xlim ( ( xmin , xmax ) , emit = True )
self . axes . set_ylim ( ( ymin , ymax ) , emit = True )
self . axes . update_datalim ( ( ( xmin , ymin ) , ( xmax , ymax ) ) )
self . conf . datalimits = [ xmin , xmax , ymin , ymax ]
self . redraw ( ) |
def load_to_array ( self , keys ) :
"""This loads the data contained in the catalogue into a numpy array . The
method works only for float data
: param keys :
A list of keys to be uploaded into the array
: type list :""" | # Preallocate the numpy array
data = np . empty ( ( len ( self . data [ keys [ 0 ] ] ) , len ( keys ) ) )
for i in range ( 0 , len ( self . data [ keys [ 0 ] ] ) ) :
for j , key in enumerate ( keys ) :
data [ i , j ] = self . data [ key ] [ i ]
return data |
def iterator ( self , envelope ) :
""": meth : ` WMessengerOnionSessionFlowProto . iterator ` implementation""" | for pair in self . __pairs :
if pair . comparator ( ) . match ( envelope ) is True :
return pair . flow ( ) . iterator ( envelope )
if self . __default_flow is not None :
return self . __default_flow . iterator ( envelope ) |
def _build_gecos ( gecos_dict ) :
'''Accepts a dictionary entry containing GECOS field names and their values ,
and returns a full GECOS comment string , to be used with usermod .''' | return '{0},{1},{2},{3}' . format ( gecos_dict . get ( 'fullname' , '' ) , gecos_dict . get ( 'roomnumber' , '' ) , gecos_dict . get ( 'workphone' , '' ) , gecos_dict . get ( 'homephone' , '' ) ) |
def format_out_of_country_calling_number ( numobj , region_calling_from ) :
"""Formats a phone number for out - of - country dialing purposes .
If no region _ calling _ from is supplied , we format the number in its
INTERNATIONAL format . If the country calling code is the same as that of
the region where the number is from , then NATIONAL formatting will be
applied .
If the number itself has a country calling code of zero or an otherwise
invalid country calling code , then we return the number with no formatting
applied .
Note this function takes care of the case for calling inside of NANPA and
between Russia and Kazakhstan ( who share the same country calling
code ) . In those cases , no international prefix is used . For regions which
have multiple international prefixes , the number in its INTERNATIONAL
format will be returned instead .
Arguments :
numobj - - The phone number to be formatted
region _ calling _ from - - The region where the call is being placed
Returns the formatted phone number""" | if not _is_valid_region_code ( region_calling_from ) :
return format_number ( numobj , PhoneNumberFormat . INTERNATIONAL )
country_code = numobj . country_code
nsn = national_significant_number ( numobj )
if not _has_valid_country_calling_code ( country_code ) :
return nsn
if country_code == _NANPA_COUNTRY_CODE :
if is_nanpa_country ( region_calling_from ) : # For NANPA regions , return the national format for these regions
# but prefix it with the country calling code .
return ( unicod ( country_code ) + U_SPACE + format_number ( numobj , PhoneNumberFormat . NATIONAL ) )
elif country_code == country_code_for_valid_region ( region_calling_from ) : # If regions share a country calling code , the country calling code
# need not be dialled . This also applies when dialling within a
# region , so this if clause covers both these cases . Technically this
# is the case for dialling from La Reunion to other overseas
# departments of France ( French Guiana , Martinique , Guadeloupe ) , but
# not vice versa - so we don ' t cover this edge case for now and for
# those cases return the version including country calling code .
# Details here :
# http : / / www . petitfute . com / voyage / 225 - info - pratiques - reunion
return format_number ( numobj , PhoneNumberFormat . NATIONAL )
# Metadata cannot be None because we checked ' _ is _ valid _ region _ code ( ) ' above .
metadata_for_region_calling_from = PhoneMetadata . metadata_for_region_or_calling_code ( country_code , region_calling_from . upper ( ) )
international_prefix = metadata_for_region_calling_from . international_prefix
# For regions that have multiple international prefixes , the international
# format of the number is returned , unless there is a preferred
# international prefix .
i18n_prefix_for_formatting = U_EMPTY_STRING
i18n_match = fullmatch ( _SINGLE_INTERNATIONAL_PREFIX , international_prefix )
if i18n_match :
i18n_prefix_for_formatting = international_prefix
elif metadata_for_region_calling_from . preferred_international_prefix is not None :
i18n_prefix_for_formatting = metadata_for_region_calling_from . preferred_international_prefix
region_code = region_code_for_country_code ( country_code )
# Metadata cannot be None because the country calling code is valid .
metadata_for_region = PhoneMetadata . metadata_for_region_or_calling_code ( country_code , region_code . upper ( ) )
formatted_national_number = _format_nsn ( nsn , metadata_for_region , PhoneNumberFormat . INTERNATIONAL )
formatted_number = _maybe_append_formatted_extension ( numobj , metadata_for_region , PhoneNumberFormat . INTERNATIONAL , formatted_national_number )
if len ( i18n_prefix_for_formatting ) > 0 :
formatted_number = ( i18n_prefix_for_formatting + U_SPACE + unicod ( country_code ) + U_SPACE + formatted_number )
else :
formatted_number = _prefix_number_with_country_calling_code ( country_code , PhoneNumberFormat . INTERNATIONAL , formatted_number )
return formatted_number |
def update_cookies ( self , cookies : Optional [ LooseCookies ] ) -> None :
"""Update request cookies header .""" | if not cookies :
return
c = SimpleCookie ( )
if hdrs . COOKIE in self . headers :
c . load ( self . headers . get ( hdrs . COOKIE , '' ) )
del self . headers [ hdrs . COOKIE ]
if isinstance ( cookies , Mapping ) :
iter_cookies = cookies . items ( )
else :
iter_cookies = cookies
# type : ignore
for name , value in iter_cookies :
if isinstance ( value , Morsel ) : # Preserve coded _ value
mrsl_val = value . get ( value . key , Morsel ( ) )
mrsl_val . set ( value . key , value . value , value . coded_value )
# type : ignore # noqa
c [ name ] = mrsl_val
else :
c [ name ] = value
# type : ignore
self . headers [ hdrs . COOKIE ] = c . output ( header = '' , sep = ';' ) . strip ( ) |
def expandvars ( s , vars = None ) :
"""Perform variable substitution on the given string
Supported syntax :
* $ VARIABLE
* $ { VARIABLE }
* $ { # VARIABLE }
* $ { VARIABLE : - default }
: param s : message to expand
: type s : str
: param vars : dictionary of variables . Default is ` ` os . environ ` `
: type vars : dict
: return : expanded string
: rtype : str""" | tpl = TemplateWithDefaults ( s )
return tpl . substitute ( vars or os . environ ) |
def __parse_organizations ( self , stream ) :
"""Parse GrimoireLab organizations .
The GrimoireLab organizations format is a YAML element stored
under the " organizations " key . The next example shows the
structure of the document :
- organizations :
Bitergia :
- bitergia . com
- support . bitergia . com
- biterg . io
LibreSoft :
- libresoft . es
: param json : YAML object to parse
: raises InvalidFormatError : raised when the format of the YAML is
not valid .""" | if not stream :
return
yaml_file = self . __load_yml ( stream )
try :
for element in yaml_file :
name = self . __encode ( element [ 'organization' ] )
if not name :
error = "Empty organization name"
msg = self . GRIMOIRELAB_INVALID_FORMAT % { 'error' : error }
raise InvalidFormatError ( cause = msg )
o = Organization ( name = name )
if 'domains' in element :
if not isinstance ( element [ 'domains' ] , list ) :
error = "List of elements expected for organization %s" % name
msg = self . GRIMOIRELAB_INVALID_FORMAT % { 'error' : error }
raise InvalidFormatError ( cause = msg )
for dom in element [ 'domains' ] :
if dom :
d = Domain ( domain = dom , is_top_domain = False )
o . domains . append ( d )
else :
error = "Empty domain name for organization %s" % name
msg = self . GRIMOIRELAB_INVALID_FORMAT % { 'error' : error }
raise InvalidFormatError ( cause = msg )
self . _organizations [ name ] = o
except KeyError as e :
error = "Attribute %s not found" % e . args
msg = self . GRIMOIRELAB_INVALID_FORMAT % { 'error' : error }
raise InvalidFormatError ( cause = msg )
except TypeError as e :
error = "%s" % e . args
msg = self . GRIMOIRELAB_INVALID_FORMAT % { 'error' : error }
raise InvalidFormatError ( cause = msg ) |
def constraint ( self , n = - 1 , fid = 0 ) :
"""Obtain the set of orthogonal equations that make the solution of
the rank deficient normal equations possible .
: param fid : the id of the sub - fitter ( numerical )""" | c = self . _getval ( "constr" , fid )
if n < 0 or n > self . deficiency ( fid ) :
return c
else :
raise RuntimeError ( "Not yet implemented" ) |
def fix_string_case ( text ) :
"""Converts case - insensitive characters to lower case
Case - sensitive characters as defined in config . AVRO _ CASESENSITIVES
retain their case , but others are converted to their lowercase
equivalents . The result is a string with phonetic - compatible case
which will the parser will understand without confusion .""" | fixed = [ ]
for i in text :
if is_case_sensitive ( i ) :
fixed . append ( i )
else :
fixed . append ( i . lower ( ) )
return '' . join ( fixed ) |
def fetchone ( self ) :
"""fetch a single row . a lock object is used to assure that a single
record will be fetched and all housekeeping done properly in a
multithreaded environment .
as getting a block is currently synchronous , this also protects
against multiple block requests ( but does not protect against
explicit calls to to _ fetchBlock ( ) )""" | self . _cursorLock . acquire ( )
# if there are available records in current block ,
# return one and advance counter
if self . _currentBlock is not None and self . _currentRecordNum < len ( self . _currentBlock ) :
x = self . _currentRecordNum
self . _currentRecordNum += 1
self . _cursorLock . release ( )
return self . _currentBlock [ x ]
# if no standby block is waiting , fetch a block
if self . _standbyBlock is None : # TODO - make sure exceptions due to problems in getting the block
# of records from the server are handled properly
self . _fetchBlock ( )
# if we still do not have a standby block ( or it is empty ) ,
# return None - no more data is available
if self . _standbyBlock is None or len ( self . _standbyBlock ) == 0 :
self . _cursorLock . release ( )
return None
# move the standby to current
self . _currentBlock = self . _standbyBlock
self . _standbyBlock = None
self . _currentRecordNum = 1
# return the first record
self . _cursorLock . release ( )
return self . _currentBlock [ 0 ] |
def _ellipsoids_bootstrap_expand ( args ) :
"""Internal method used to compute the expansion factor ( s ) for a collection
of bounding ellipsoids using bootstrapping .""" | # Unzipping .
points , pointvol , vol_dec , vol_check = args
rstate = np . random
# Resampling .
npoints , ndim = points . shape
idxs = rstate . randint ( npoints , size = npoints )
# resample
idx_in = np . unique ( idxs )
# selected objects
sel = np . ones ( npoints , dtype = 'bool' )
sel [ idx_in ] = False
idx_out = np . where ( sel ) [ 0 ]
# " missing " objects
if len ( idx_out ) < 2 : # edge case
idx_out = np . append ( idx_out , [ 0 , 1 ] )
points_in , points_out = points [ idx_in ] , points [ idx_out ]
# Compute bounding ellipsoids .
ell = bounding_ellipsoid ( points_in , pointvol = pointvol )
ells = _bounding_ellipsoids ( points_in , ell , pointvol = pointvol , vol_dec = vol_dec , vol_check = vol_check )
# Compute normalized distances to missing points .
dists = [ min ( [ el . distance ( p ) for el in ells ] ) for p in points_out ]
# Compute expansion factor .
expand = max ( 1. , max ( dists ) )
return expand |
def clean_time ( time_string ) :
"""Return a datetime from the Amazon - provided datetime string""" | # Get a timezone - aware datetime object from the string
time = dateutil . parser . parse ( time_string )
if not settings . USE_TZ : # If timezone support is not active , convert the time to UTC and
# remove the timezone field
time = time . astimezone ( timezone . utc ) . replace ( tzinfo = None )
return time |
def count ( self , * args , ** kw ) :
"""Counts whether the memoized value has already been
set ( a hit ) or not ( a miss ) .""" | obj = args [ 0 ]
if self . method_name in obj . _memo :
self . hit = self . hit + 1
else :
self . miss = self . miss + 1 |
def get_user ( self , id ) :
"""Returns details about the user for the given id .
Use get _ user _ by _ email ( ) or get _ user _ by _ username ( ) for help
identifiying the id .""" | self . assert_has_permission ( 'scim.read' )
return self . _get ( self . uri + '/Users/%s' % ( id ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.