signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def write ( self , message ) :
"""Writes given message to logger handlers .
: param message : Message .
: type message : unicode
: return : Method success .
: rtype : bool"""
|
for handler in self . __logger . __dict__ [ "handlers" ] :
handler . stream . write ( message )
return True
|
def get_character ( self , position , offset = 0 ) :
"""Return character at * position * with the given offset ."""
|
position = self . get_position ( position ) + offset
cursor = self . textCursor ( )
cursor . movePosition ( QTextCursor . End )
if position < cursor . position ( ) :
cursor . setPosition ( position )
cursor . movePosition ( QTextCursor . Right , QTextCursor . KeepAnchor )
return to_text_string ( cursor . selectedText ( ) )
else :
return ''
|
def split_run ( self ) :
""". . _ split _ run :
Splits the assembly code into
* commands
* directives
* jump marks"""
|
sp_run = [ ]
for line in self . open_stream . read ( ) . split ( "\n" ) :
self . line_count += 1
if ( self . iscomment ( line ) ) :
continue
if ( line . isspace ( ) ) :
continue
line = self . stripcomments ( line )
words = line . split ( )
if ( len ( words ) == 0 ) :
continue
if ( isreference ( words ) ) :
self . add_ref ( words )
continue
if ( self . isdirective ( words ) ) :
sp_run . append ( self . handle_directive ( words ) )
continue
if ( not words [ 0 ] in self . commands ) :
raise AssembleError ( "[Line {}]: Unknown Mnemonic '{}'" . format ( self . line_count , words [ 0 ] ) )
mnemo = words [ 0 ]
args = words [ 1 : ]
if ( len ( args ) != self . commands [ mnemo ] . numargs ( ) ) : # check for default arguments
args_ = list ( args )
for argtype in self . commands [ mnemo ] . argtypes ( ) [ len ( args ) : ] :
if ( not argtype . can_default ) :
raise AssembleError ( "[Line {}]: Mnemonic '{}' expects {} arguments, but got {}" . format ( self . line_count , mnemo , self . commands [ mnemo ] . numargs ( ) , len ( args ) ) )
else :
args_ . append ( argtype . default )
args = args_
self . word_count += 1 + len ( args )
logging . debug ( "split run: " + str ( ( self . line_count , "command" , self . commands [ mnemo ] , ( args ) ) ) )
sp_run . append ( ( self . line_count , "command" , self . commands [ mnemo ] , ( args ) ) )
return sp_run
|
def execute ( self , command , is_displayed = True , profile = None ) :
"""Execute a command on the remote server
: param command : Command to execute remotely
: param is _ displayed : True if information should be display ; false to return output
( default : true )
: param profile : Profile to source ( unix like system only should set this )
( default : None )
: return : A tuple defining the execution of the command
* output - The output of the execution if the output was not displayed
* exit _ status - The exit status of remotely executed script"""
|
# Modify the command for remote execution
command = " " . join ( "'{0}'" . format ( argument ) for argument in command )
# Execute the command and initialize for reading ( close stdin / writes )
if not profile is None and not profile is "None" :
command = "source " + profile + ";" + command
stdin , stdout , stderr = self . ssh . exec_command ( command )
stdin . channel . shutdown_write ( )
stdin . close ( )
# Print or gather output as is occurs
output = None
if not is_displayed :
output = [ ]
output . append ( stdout . channel . recv ( len ( stdout . channel . in_buffer ) ) . decode ( "utf-8" ) )
output . append ( stderr . channel . recv ( len ( stderr . channel . in_buffer ) ) . decode ( "utf-8" ) )
channel = stdout . channel
while not channel . closed or channel . recv_ready ( ) or channel . recv_stderr_ready ( ) : # Ensure the channel was not closed prematurely and all data has been ready
is_data_present = False
handles = select . select ( [ channel ] , [ ] , [ ] )
for read in handles [ 0 ] : # Read stdout and / or stderr if data is present
buffer = None
if read . recv_ready ( ) :
buffer = channel . recv ( len ( read . in_buffer ) ) . decode ( "utf-8" )
if is_displayed :
sys . stdout . write ( buffer )
if read . recv_stderr_ready ( ) :
buffer = stderr . channel . recv_stderr ( len ( read . in_stderr_buffer ) ) . decode ( "utf-8" )
if is_displayed :
sys . stderr . write ( buffer )
# Determine if the output should be updated and displayed
if buffer is not None :
is_data_present = True
if not is_displayed :
output . append ( buffer )
# Ensure all the data has been read and exit loop if completed
if ( not is_data_present and channel . exit_status_ready ( ) and not stderr . channel . recv_stderr_ready ( ) and not channel . recv_ready ( ) ) : # Stop reading and close the channel to stop processing
channel . shutdown_read ( )
channel . close ( )
break
# Close file handles for stdout and stderr
stdout . close ( )
stderr . close ( )
# Process the output ( if available )
if output is not None :
output = "" . join ( output )
# Return the output from the executed command
return output , channel . recv_exit_status ( )
|
def _file_nums_str ( self , n_all , n_type , n_ign ) :
"""Construct a string showing the number of different file types .
Returns
f _ str : str"""
|
# ' other ' is the difference between all and named
n_oth = n_all - np . sum ( n_type )
f_str = "{} Files" . format ( n_all ) + " ("
if len ( n_type ) :
f_str += ", " . join ( "{} {}" . format ( name , num ) for name , num in zip ( self . _COUNT_FILE_TYPES , n_type ) )
f_str += ", "
f_str += "other {}; {} ignored)" . format ( n_oth , n_ign )
return f_str
|
def install ( self , connection , partition , table_name = None , columns = None , materialize = False , logger = None ) :
"""Creates FDW or materialize view for given partition .
Args :
connection : connection to postgresql
partition ( orm . Partition ) :
materialize ( boolean ) : if True , create read - only table . If False create virtual table .
Returns :
str : name of the created table ."""
|
partition . localize ( )
self . _add_partition ( connection , partition )
fdw_table = partition . vid
view_table = '{}_v' . format ( fdw_table )
if materialize :
with connection . cursor ( ) as cursor :
view_exists = self . _relation_exists ( connection , view_table )
if view_exists :
logger . debug ( 'Materialized view of the partition already exists.\n partition: {}, view: {}' . format ( partition . name , view_table ) )
else :
query = 'CREATE MATERIALIZED VIEW {} AS SELECT * FROM {};' . format ( view_table , fdw_table )
logger . debug ( 'Creating new materialized view of the partition.' '\n partition: {}, view: {}, query: {}' . format ( partition . name , view_table , query ) )
cursor . execute ( query )
cursor . execute ( 'COMMIT;' )
final_table = view_table if materialize else fdw_table
with connection . cursor ( ) as cursor :
view_q = "CREATE VIEW IF NOT EXISTS {} AS SELECT * FROM {} " . format ( partition . vid , final_table )
cursor . execute ( view_q )
cursor . execute ( 'COMMIT;' )
return partition . vid
|
def is_tiff_format ( self ) :
"""Checks whether file format is a TIFF image format
Example : ` ` MimeType . TIFF . is _ tiff _ format ( ) ` ` or ` ` MimeType . is _ tiff _ format ( MimeType . TIFF ) ` `
: param self : File format
: type self : MimeType
: return : ` ` True ` ` if file is in image format , ` ` False ` ` otherwise
: rtype : bool"""
|
return self in frozenset ( [ MimeType . TIFF , MimeType . TIFF_d8 , MimeType . TIFF_d16 , MimeType . TIFF_d32f ] )
|
async def start_serving ( self , address = None , sockets = None , ** kw ) :
"""create the server endpoint ."""
|
if self . _server :
raise RuntimeError ( 'Already serving' )
server = DGServer ( self . _loop )
loop = self . _loop
if sockets :
for sock in sockets :
transport , _ = await loop . create_datagram_endpoint ( self . create_protocol , sock = sock )
server . transports . append ( transport )
elif isinstance ( address , tuple ) :
transport , _ = await loop . create_datagram_endpoint ( self . create_protocol , local_addr = address )
server . transports . append ( transport )
else :
raise RuntimeError ( 'sockets or address must be supplied' )
self . _set_server ( server )
|
def graph_type ( self , graph ) :
"""What type of graph is this ?"""
|
graph = self . pack ( graph )
return self . sql ( 'graph_type' , graph ) . fetchone ( ) [ 0 ]
|
def tags ( self ) :
"""Returns a ` set ` of unique tags contained in ` raw _ content ` ."""
|
if not self . raw_content :
return set ( )
return { word . strip ( "#" ) . lower ( ) for word in self . raw_content . split ( ) if word . startswith ( "#" ) and len ( word ) > 1 }
|
def debug ( version = False ) :
"""Shows the parsed manage file - V shows version"""
|
if version :
print ( __version__ )
return
print ( json . dumps ( MANAGE_DICT , indent = 2 ) )
|
def add_info ( self , data ) :
"""add info to a build"""
|
for key in data : # verboten
if key in ( 'status' , 'state' , 'name' , 'id' , 'application' , 'services' , 'release' ) :
raise ValueError ( "Sorry, cannot set build info with key of {}" . format ( key ) )
self . obj [ key ] = data [ key ]
self . changes . append ( "Adding build info" )
return self
|
def walk_packages ( path = None , prefix = '' , onerror = None ) :
"""Yields ( module _ loader , name , ispkg ) for all modules recursively
on path , or , if path is None , all accessible modules .
' path ' should be either None or a list of paths to look for
modules in .
' prefix ' is a string to output on the front of every module name
on output .
Note that this function must import all * packages * ( NOT all
modules ! ) on the given path , in order to access the _ _ path _ _
attribute to find submodules .
' onerror ' is a function which gets called with one argument ( the
name of the package which was being imported ) if any exception
occurs while trying to import a package . If no onerror function is
supplied , ImportErrors are caught and ignored , while all other
exceptions are propagated , terminating the search .
Examples :
# list all modules python can access
walk _ packages ( )
# list all submodules of ctypes
walk _ packages ( ctypes . _ _ path _ _ , ctypes . _ _ name _ _ + ' . ' )"""
|
def seen ( p , m = { } ) :
if p in m :
return True
m [ p ] = True
for importer , name , ispkg in iter_modules ( path , prefix ) :
yield importer , name , ispkg
if ispkg :
try :
__import__ ( name )
except ImportError :
if onerror is not None :
onerror ( name )
except Exception :
if onerror is not None :
onerror ( name )
else :
raise
else :
path = getattr ( sys . modules [ name ] , '__path__' , None ) or [ ]
# don ' t traverse path items we ' ve seen before
path = [ p for p in path if not seen ( p ) ]
for item in walk_packages ( path , name + '.' , onerror ) :
yield item
|
def standard_cl_params ( items ) :
"""Shared command line parameters for GATK programs .
Handles no removal of duplicate reads for amplicon or
non mark duplicate experiments . If we have pre - aligned inputs we
ignore the value or mark duplicates ( since they may already be
marked in the input BAM ) ."""
|
out = [ ]
def _skip_duplicates ( data ) :
return ( dd . get_coverage_interval ( data ) == "amplicon" or ( dd . get_aligner ( data ) and not dd . get_mark_duplicates ( data ) ) )
if any ( _skip_duplicates ( d ) for d in items ) :
broad_runner = broad . runner_from_config ( items [ 0 ] [ "config" ] )
gatk_type = broad_runner . gatk_type ( )
if gatk_type == "gatk4" :
out += [ "--disable-read-filter" , "NotDuplicateReadFilter" ]
elif LooseVersion ( broad_runner . gatk_major_version ( ) ) >= LooseVersion ( "3.5" ) :
out += [ "-drf" , "DuplicateRead" ]
return out
|
def close ( self ) :
"Stop the output stream , but further download will still perform"
|
if self . stream :
self . stream . close ( self . scheduler )
self . stream = None
|
def Parse ( conditions ) :
"""Parses the file finder condition types into the condition objects .
Args :
conditions : An iterator over ` FileFinderCondition ` objects .
Yields :
` ContentCondition ` objects that correspond to the file - finder conditions ."""
|
kind = rdf_file_finder . FileFinderCondition . Type
classes = { kind . CONTENTS_LITERAL_MATCH : LiteralMatchCondition , kind . CONTENTS_REGEX_MATCH : RegexMatchCondition , }
for condition in conditions :
try :
yield classes [ condition . condition_type ] ( condition )
except KeyError :
pass
|
def acquire_lock ( self , force = False ) :
"""Takes out a lock ( creates a < dbname > . lock file ) for the database .
: param force : Whether to force taking " ownership " of the lock file .
: type force : bool
: raises : : class : ` keepassdb . exc . DatabaseAlreadyLocked ` - If the database is already locked ( and force not set to True ) ."""
|
if self . readonly :
raise exc . ReadOnlyDatabase ( )
if not self . _locked :
self . log . debug ( "Acquiring lock file: {0}" . format ( self . lockfile ) )
if os . path . exists ( self . lockfile ) and not force :
raise exc . DatabaseAlreadyLocked ( 'Lock file already exists: {0}' . format ( self . lockfile ) )
open ( self . lockfile , 'w' ) . close ( )
self . _locked = True
|
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
logs
/ containers / < id > / logs
power
/ containers / < id > / power
` ` super ` ` is called otherwise ."""
|
if which in ( 'logs' , 'power' ) :
return '{0}/{1}' . format ( super ( AbstractDockerContainer , self ) . path ( which = 'self' ) , which )
return super ( AbstractDockerContainer , self ) . path ( which )
|
def _yield_spatial_table ( patch , div , spp_col , count_col , x_col , y_col ) :
"""Calculates an empirical spatial table
Yields
DataFrame
Spatial table for each division . See Notes .
Notes
The spatial table is the precursor to the SAR , EAR , and grid - based
commonality metrics . Each row in the table corresponds to a cell created by
a given division . Columns are cell _ loc ( within the grid defined by the
division ) , spp _ set , n _ spp , and n _ individs ."""
|
# Catch error if you don ' t use ; after divs in comm _ grid in MacroecoDesktop
try :
div_split_list = div . replace ( ';' , '' ) . split ( ',' )
except AttributeError :
div_split_list = str ( div ) . strip ( "()" ) . split ( ',' )
div_split = ( x_col + ':' + div_split_list [ 0 ] + ';' + y_col + ':' + div_split_list [ 1 ] )
# Get cell _ locs
# Requires _ parse _ splits and _ product functions to go y inside of x
x_starts , x_ends = _col_starts_ends ( patch , x_col , div_split_list [ 0 ] )
x_offset = ( x_ends [ 0 ] - x_starts [ 0 ] ) / 2
x_locs = x_starts + x_offset
y_starts , y_ends = _col_starts_ends ( patch , y_col , div_split_list [ 1 ] )
y_offset = ( y_ends [ 0 ] - y_starts [ 0 ] ) / 2
y_locs = y_starts + y_offset
cell_locs = _product ( x_locs , y_locs )
# Get spp set and count for all cells
n_spp_list = [ ]
# Number of species in cell
n_individs_list = [ ]
spp_set_list = [ ]
# Set object giving unique species IDs in cell
for cellstring , cellpatch in _yield_subpatches ( patch , div_split , name = 'div' ) :
spp_set = set ( np . unique ( cellpatch . table [ spp_col ] ) )
spp_set_list . append ( spp_set )
n_spp_list . append ( len ( spp_set ) )
n_individs_list . append ( np . sum ( cellpatch . table [ count_col ] ) )
# Create and return dataframe
df = pd . DataFrame ( { 'cell_loc' : cell_locs , 'spp_set' : spp_set_list , 'n_spp' : n_spp_list , 'n_individs' : n_individs_list } )
return df
|
def create_snapshot ( self , name = None , description = None , force = False ) :
"""Creates a snapshot of this volume , with an optional name and
description .
Normally snapshots will not happen if the volume is attached . To
override this default behavior , pass force = True ."""
|
name = name or ""
description = description or ""
# Note that passing in non - None values is required for the _ create _ body
# method to distinguish between this and the request to create and
# instance .
return self . manager . create_snapshot ( volume = self , name = name , description = description , force = force )
|
def apply_adaptation ( self , target_illuminant , adaptation = 'bradford' ) :
"""This applies an adaptation matrix to change the XYZ color ' s illuminant .
You ' ll most likely only need this during RGB conversions ."""
|
logger . debug ( " \- Original illuminant: %s" , self . illuminant )
logger . debug ( " \- Target illuminant: %s" , target_illuminant )
# If the XYZ values were taken with a different reference white than the
# native reference white of the target RGB space , a transformation matrix
# must be applied .
if self . illuminant != target_illuminant :
logger . debug ( " \* Applying transformation from %s to %s " , self . illuminant , target_illuminant )
# Sets the adjusted XYZ values , and the new illuminant .
apply_chromatic_adaptation_on_color ( color = self , targ_illum = target_illuminant , adaptation = adaptation )
|
def to_dataframe ( self ) :
"""Returns the entire dataset as a single pandas DataFrame .
Returns
df : DataFrame with shape ( n _ instances , n _ columns )
A pandas DataFrame containing the complete original data table
including all targets ( specified by the meta data ) and all
features ( including those that might have been filtered out ) ."""
|
if pd is None :
raise DatasetsError ( "pandas is required to load DataFrame, it can be installed with pip" )
path = find_dataset_path ( self . name , ext = ".csv.gz" , data_home = self . data_home )
return pd . read_csv ( path , compression = "gzip" )
|
def replicate_value ( value , n = 2 , copy = True ) :
"""Replicates ` n ` times the input value .
: param n :
Number of replications .
: type n : int
: param value :
Value to be replicated .
: type value : T
: param copy :
If True the list contains deep - copies of the value .
: type copy : bool
: return :
A list with the value replicated ` n ` times .
: rtype : list
Example : :
> > > from functools import partial
> > > fun = partial ( replicate _ value , n = 5)
> > > fun ( { ' a ' : 3 } )
( { ' a ' : 3 } , { ' a ' : 3 } , { ' a ' : 3 } , { ' a ' : 3 } , { ' a ' : 3 } )"""
|
return bypass ( * [ value ] * n , copy = copy )
|
def recursive_input ( input_label , type_class ) :
'''Recursive user input prompter with type checker
Args
type _ class : type
name of python type ( e . g . float , no parentheses )
Returns
output : str
value entered by user converted to type ` type _ class `
Note
Use ` ctrl - c ` to exit input cycling'''
|
import sys
type_str = str ( type_class ) . split ( "'" ) [ 1 ]
msg = 'Enter {} (type `{}`): ' . format ( input_label , type_str )
# Catch ` Ctrl - c ` keyboard interupts
try :
output = input ( msg )
print ( '' )
# Type class input , else cycle input function again
try :
output = type_class ( output )
return output
except :
print ( 'Input must be of type `{}`\n' . format ( type_str ) )
return recursive_input ( input_label , type_class )
# Keyboard interrupt passed , exit recursive input
except KeyboardInterrupt :
return sys . exit ( )
|
def get_file_contents ( filename ) :
"""Read file contents from file ` filename `"""
|
data = None
try :
with open ( filename ) as pf :
data = pf . read ( )
except IOError : # File not found , return None
pass
return data
|
def _load_char ( self , char ) :
"""Build and store a glyph corresponding to an individual character
Parameters
char : str
A single character to be represented ."""
|
assert isinstance ( char , string_types ) and len ( char ) == 1
assert char not in self . _glyphs
# load new glyph data from font
_load_glyph ( self . _font , char , self . _glyphs )
# put new glyph into the texture
glyph = self . _glyphs [ char ]
bitmap = glyph [ 'bitmap' ]
# convert to padded array
data = np . zeros ( ( bitmap . shape [ 0 ] + 2 * self . _spread , bitmap . shape [ 1 ] + 2 * self . _spread ) , np . uint8 )
data [ self . _spread : - self . _spread , self . _spread : - self . _spread ] = bitmap
# Store , while scaling down to proper size
height = data . shape [ 0 ] // self . ratio
width = data . shape [ 1 ] // self . ratio
region = self . _atlas . get_free_region ( width + 2 , height + 2 )
if region is None :
raise RuntimeError ( 'Cannot store glyph' )
x , y , w , h = region
x , y , w , h = x + 1 , y + 1 , w - 2 , h - 2
self . _renderer . render_to_texture ( data , self . _atlas , ( x , y ) , ( w , h ) )
u0 = x / float ( self . _atlas . shape [ 1 ] )
v0 = y / float ( self . _atlas . shape [ 0 ] )
u1 = ( x + w ) / float ( self . _atlas . shape [ 1 ] )
v1 = ( y + h ) / float ( self . _atlas . shape [ 0 ] )
texcoords = ( u0 , v0 , u1 , v1 )
glyph . update ( dict ( size = ( w , h ) , texcoords = texcoords ) )
|
def member_present ( ip , port , balancer_id , profile , ** libcloud_kwargs ) :
'''Ensure a load balancer member is present
: param ip : IP address for the new member
: type ip : ` ` str ` `
: param port : Port for the new member
: type port : ` ` int ` `
: param balancer _ id : id of a load balancer you want to attach the member to
: type balancer _ id : ` ` str ` `
: param profile : The profile key
: type profile : ` ` str ` `'''
|
existing_members = __salt__ [ 'libcloud_loadbalancer.list_balancer_members' ] ( balancer_id , profile )
for member in existing_members :
if member [ 'ip' ] == ip and member [ 'port' ] == port :
return state_result ( True , "Member already present" , balancer_id )
member = __salt__ [ 'libcloud_loadbalancer.balancer_attach_member' ] ( balancer_id , ip , port , profile , ** libcloud_kwargs )
return state_result ( True , "Member added to balancer, id: {0}" . format ( member [ 'id' ] ) , balancer_id , member )
|
def write_mzxml ( filename , df , info = None , precision = 'f' ) :
"""Precision is either f or d ."""
|
for r in df . values :
df . columns
pass
|
def find ( cls , key = None , ** kwargs ) :
"""Find an asset by key
E . g .
shopify . Asset . find ( ' layout / theme . liquid ' , theme _ id = 99)"""
|
if not key :
return super ( Asset , cls ) . find ( ** kwargs )
params = { "asset[key]" : key }
params . update ( kwargs )
theme_id = params . get ( "theme_id" )
path_prefix = "%s/themes/%s" % ( cls . site , theme_id ) if theme_id else cls . site
resource = cls . find_one ( "%s/assets.%s" % ( path_prefix , cls . format . extension ) , ** params )
if theme_id and resource :
resource . _prefix_options [ "theme_id" ] = theme_id
return resource
|
def plot_ell ( fignum , pars , col , lower , plot ) :
"""function to calcualte / plot points on an ellipse about Pdec , Pdip with angle beta , gamma
Parameters
_ _ _ _ _
fignum : matplotlib figure number
pars : list of [ Pdec , Pinc , beta , Bdec , Binc , gamma , Gdec , Ginc ]
where P is direction , Bdec , Binc are beta direction , and Gdec , Ginc are gamma direction
col : color for ellipse
lower : boolean , if True , lower hemisphere projection
plot : boolean , if False , return the points , if False , make the plot"""
|
plt . figure ( num = fignum )
rad = old_div ( np . pi , 180. )
Pdec , Pinc , beta , Bdec , Binc , gamma , Gdec , Ginc = pars [ 0 ] , pars [ 1 ] , pars [ 2 ] , pars [ 3 ] , pars [ 4 ] , pars [ 5 ] , pars [ 6 ] , pars [ 7 ]
if beta > 90. or gamma > 90 :
beta = 180. - beta
gamma = 180. - gamma
Pdec = Pdec - 180.
Pinc = - Pinc
beta , gamma = beta * rad , gamma * rad
# convert to radians
X_ell , Y_ell , X_up , Y_up , PTS = [ ] , [ ] , [ ] , [ ] , [ ]
nums = 201
xnum = old_div ( float ( nums - 1. ) , 2. )
# set up t matrix
t = [ [ 0 , 0 , 0 ] , [ 0 , 0 , 0 ] , [ 0 , 0 , 0 ] ]
X = pmag . dir2cart ( ( Pdec , Pinc , 1.0 ) )
# convert to cartesian coordintes
if lower == 1 and X [ 2 ] < 0 :
for i in range ( 3 ) :
X [ i ] = - X [ i ]
# set up rotation matrix t
t [ 0 ] [ 2 ] = X [ 0 ]
t [ 1 ] [ 2 ] = X [ 1 ]
t [ 2 ] [ 2 ] = X [ 2 ]
X = pmag . dir2cart ( ( Bdec , Binc , 1.0 ) )
if lower == 1 and X [ 2 ] < 0 :
for i in range ( 3 ) :
X [ i ] = - X [ i ]
t [ 0 ] [ 0 ] = X [ 0 ]
t [ 1 ] [ 0 ] = X [ 1 ]
t [ 2 ] [ 0 ] = X [ 2 ]
X = pmag . dir2cart ( ( Gdec , Ginc , 1.0 ) )
if lower == 1 and X [ 2 ] < 0 :
for i in range ( 3 ) :
X [ i ] = - X [ i ]
t [ 0 ] [ 1 ] = X [ 0 ]
t [ 1 ] [ 1 ] = X [ 1 ]
t [ 2 ] [ 1 ] = X [ 2 ]
# set up v matrix
v = [ 0 , 0 , 0 ]
for i in range ( nums ) : # incremental point along ellipse
psi = float ( i ) * np . pi / xnum
v [ 0 ] = np . sin ( beta ) * np . cos ( psi )
v [ 1 ] = np . sin ( gamma ) * np . sin ( psi )
v [ 2 ] = np . sqrt ( 1. - v [ 0 ] ** 2 - v [ 1 ] ** 2 )
elli = [ 0 , 0 , 0 ]
# calculate points on the ellipse
for j in range ( 3 ) :
for k in range ( 3 ) : # cartesian coordinate j of ellipse
elli [ j ] = elli [ j ] + t [ j ] [ k ] * v [ k ]
pts = pmag . cart2dir ( elli )
PTS . append ( [ pts [ 0 ] , pts [ 1 ] ] )
# put on an equal area projection
R = old_div ( np . sqrt ( 1. - abs ( elli [ 2 ] ) ) , ( np . sqrt ( elli [ 0 ] ** 2 + elli [ 1 ] ** 2 ) ) )
if elli [ 2 ] <= 0 : # for i in range ( 3 ) : elli [ i ] = - elli [ i ]
X_up . append ( elli [ 1 ] * R )
Y_up . append ( elli [ 0 ] * R )
else :
X_ell . append ( elli [ 1 ] * R )
Y_ell . append ( elli [ 0 ] * R )
if plot == 1 :
col = col [ 0 ] + '.'
if X_ell != [ ] :
plt . plot ( X_ell , Y_ell , col , markersize = 3 )
if X_up != [ ] :
plt . plot ( X_up , Y_up , col , markersize = 3 )
else :
return PTS
|
def construct ( self , request , service = None , http_args = None , ** kwargs ) :
"""Construct a dictionary to be added to the HTTP request headers
: param request : The request
: param service : A
: py : class : ` oidcservice . service . Service ` instance
: param http _ args : HTTP arguments
: return : dictionary of HTTP arguments"""
|
if http_args is None :
http_args = { }
if "headers" not in http_args :
http_args [ "headers" ] = { }
# get the username ( client _ id ) and the password ( client _ secret )
try :
passwd = kwargs [ "password" ]
except KeyError :
try :
passwd = request [ "client_secret" ]
except KeyError :
passwd = service . service_context . client_secret
try :
user = kwargs [ "user" ]
except KeyError :
user = service . service_context . client_id
# The credential is username and password concatenated with a ' : '
# in between and then base 64 encoded becomes the authentication
# token .
credentials = "{}:{}" . format ( quote_plus ( user ) , quote_plus ( passwd ) )
authz = base64 . urlsafe_b64encode ( credentials . encode ( "utf-8" ) ) . decode ( "utf-8" )
http_args [ "headers" ] [ "Authorization" ] = "Basic {}" . format ( authz )
# If client _ secret was part of the request message instance remove it
try :
del request [ "client_secret" ]
except ( KeyError , TypeError ) :
pass
# If we ' re doing an access token request with an authorization code
# then we should add client _ id to the request if it ' s not already
# there
if isinstance ( request , AccessTokenRequest ) and request [ 'grant_type' ] == 'authorization_code' :
if 'client_id' not in request :
try :
request [ 'client_id' ] = service . service_context . client_id
except AttributeError :
pass
else : # remove client _ id if not required by the request definition
try :
_req = request . c_param [ "client_id" ] [ VREQUIRED ]
except ( KeyError , AttributeError ) :
_req = False
# if it ' s not required remove it
if not _req :
try :
del request [ "client_id" ]
except KeyError :
pass
return http_args
|
def lazy_groups_of ( iterator : Iterator [ A ] , group_size : int ) -> Iterator [ List [ A ] ] :
"""Takes an iterator and batches the individual instances into lists of the
specified size . The last list may be smaller if there are instances left over ."""
|
return iter ( lambda : list ( islice ( iterator , 0 , group_size ) ) , [ ] )
|
def _make_request ( self , uri , method , body , headers = { } ) :
"""Wraps the response and content returned by : mod : ` httplib2 ` into a
: class : ` ~ webunit2 . response . HttpResponse ` object .
` ` uri ` ` :
Absolute URI to the resource .
` ` method ` ` :
Any supported HTTP methods defined in : rfc : ` 2616 ` .
` ` body ` ` :
In the case of POST and PUT requests , this can contain the contents
of the request .
` ` headers ` ` :
Dictionary of header values to be sent as part of the request .
Returns a : class : ` ~ webunit2 . response . HttpResponse ` object containing
the request results ."""
|
response , content = self . _httpobj . request ( uri , method = method , body = body , headers = headers )
return HttpResponse ( response , content )
|
def paginate_response ( self , queryset , serializers_kwargs = { } ) :
"""Optionally return paginated response .
If pagination parameters are provided in the request , then paginated response
is returned , otherwise response is not paginated ."""
|
page = self . paginate_queryset ( queryset )
if page is not None :
serializer = self . get_serializer ( page , many = True , ** serializers_kwargs )
return self . get_paginated_response ( serializer . data )
serializer = self . get_serializer ( queryset , many = True , ** serializers_kwargs )
return Response ( serializer . data )
|
def analyze ( self , scratch , ** kwargs ) :
"""Categorize instances of attempted say and sound synchronization ."""
|
errors = Counter ( )
for script in self . iter_scripts ( scratch ) :
prev_name , prev_depth , prev_block = '' , 0 , script . blocks [ 0 ]
gen = self . iter_blocks ( script . blocks )
for name , depth , block in gen :
if prev_depth == depth :
if prev_name in self . SAY_THINK :
if name == 'play sound %s until done' :
if not self . is_blank ( prev_block . args [ 0 ] ) :
errors += self . check ( gen )
# TODO : What about play sound ?
elif prev_name in self . SAY_THINK_DURATION and 'play sound %s' in name :
errors [ '1' ] += 1
elif prev_name == 'play sound %s' :
if name in self . SAY_THINK :
errors [ self . INCORRECT ] += 1
elif name in self . SAY_THINK_DURATION :
if self . is_blank ( block . args [ 0 ] ) :
errors [ self . ERROR ] += 1
else :
errors [ self . HACKISH ] += 1
elif prev_name == 'play sound %s until done' and name in self . ALL_SAY_THINK :
if not self . is_blank ( block . args [ 0 ] ) :
errors [ self . INCORRECT ] += 1
# TODO : Should there be an else clause here ?
prev_name , prev_depth , prev_block = name , depth , block
return { 'sound' : errors }
|
def __reversed_filter ( filterable , filter_ , logic_operation = 'and' ) :
"""reverse filtering DataFrame using filter _ key - value conditions applying logic _ operation
find rows where existing filterable columns ( and its values ) fitting the filter _ criterion"""
|
condition = [ ]
try :
subscribers_for_any = filterable . query ( 'type == "__ANY__"' )
except pd . core . computation . ops . UndefinedVariableError :
subscribers_for_any = pd . DataFrame ( )
if not filter_ :
return filterable
else :
for existing_col in filterable :
for meta_tag , meta_value in filter_ . items ( ) :
if meta_tag == existing_col :
condition . append ( '{key} == "{value}"' . format ( key = meta_tag , value = meta_value ) )
try :
res = filterable . query ( " {operation} " . format ( operation = logic_operation ) . join ( condition ) )
except pd . core . computation . ops . UndefinedVariableError :
return pd . DataFrame ( ) . append ( subscribers_for_any )
else :
return res . append ( subscribers_for_any )
|
def schedule_next_job ( self ) :
"""Get the next job in the queue to be scheduled , and send a message
to the workers to start the job .
Returns : None"""
|
next_job = self . storage_backend . get_next_scheduled_job ( )
# TODO : don ' t loop over if workers are already all running
if not next_job :
logging . debug ( "No job to schedule right now." )
return
try :
self . messaging_backend . send ( self . worker_mailbox , Message ( type = MessageType . START_JOB , message = { 'job' : next_job } ) )
self . storage_backend . mark_job_as_queued ( next_job . job_id )
except Full :
logging . debug ( "Worker queue full; skipping scheduling of job {} for now." . format ( next_job . job_id ) )
return
|
def hacking_todo_format ( physical_line , tokens ) :
"""Check for ' TODO ( ) ' .
OpenStack HACKING guide recommendation for TODO :
Include your name with TODOs as in " # TODO ( termie ) "
Okay : # TODO ( sdague )
H101 : # TODO fail
H101 : # TODO
H101 : # TODO ( jogo ) fail
Okay : TODO = 5"""
|
# TODO ( jogo ) : make the following doctests pass :
# H101 : # TODO ( jogo fail
# H101 : # TODO ( jogo
# TODO ( jogo ) : make this check docstrings as well ( don ' t have to be at top
# of function )
for token_type , text , start_index , _ , _ in tokens :
if token_type == tokenize . COMMENT :
pos = text . find ( 'TODO' )
pos1 = text . find ( 'TODO(' )
if ( pos != pos1 ) :
return pos + start_index [ 1 ] , "H101: Use TODO(NAME)"
|
def is_quoted ( position , text ) :
"""Determine if the position in the text falls within a quote ."""
|
def matching ( quotemark1 , quotemark2 ) :
straight = '\"\''
curly = '“”'
if quotemark1 in straight and quotemark2 in straight :
return True
if quotemark1 in curly and quotemark2 in curly :
return True
else :
return False
def find_ranges ( text ) :
s = 0
q = pc = ''
start = None
ranges = [ ]
seps = " .,:;-\r\n"
quotes = [ '\"' , '“' , '”' , "'" ]
for i , c in enumerate ( text + "\n" ) :
if s == 0 and c in quotes and pc in seps :
start = i
s = 1
q = c
elif s == 1 and matching ( c , q ) :
s = 2
elif s == 2 :
if c in seps :
ranges . append ( ( start + 1 , i - 1 ) )
start = None
s = 0
else :
s = 1
pc = c
return ranges
def position_in_ranges ( ranges , position ) :
for start , end in ranges :
if start <= position < end :
return True
return False
return position_in_ranges ( find_ranges ( text ) , position )
|
def update ( user = None , conf_file = None , bin_env = None , name = None ) :
'''Reload config and add / remove / update as necessary
user
user to run supervisorctl as
conf _ file
path to supervisord config file
bin _ env
path to supervisorctl bin or path to virtualenv with supervisor
installed
name
name of the process group to update . if none then update any
process group that has changes
CLI Example :
. . code - block : : bash
salt ' * ' supervisord . update'''
|
if isinstance ( name , string_types ) :
if name . endswith ( ':' ) :
name = name [ : - 1 ]
elif name . endswith ( ':*' ) :
name = name [ : - 2 ]
ret = __salt__ [ 'cmd.run_all' ] ( _ctl_cmd ( 'update' , name , conf_file , bin_env ) , runas = user , python_shell = False , )
return _get_return ( ret )
|
def from_array ( array ) :
"""Deserialize a new InlineKeyboardMarkup from a given dictionary .
: return : new InlineKeyboardMarkup instance .
: rtype : InlineKeyboardMarkup"""
|
if array is None or not array :
return None
# end if
assert_type_or_raise ( array , dict , parameter_name = "array" )
from pytgbot . api_types . sendable . reply_markup import InlineKeyboardButton
data = { }
data [ 'inline_keyboard' ] = InlineKeyboardButton . from_array_list ( array . get ( 'inline_keyboard' ) , list_level = 2 )
instance = InlineKeyboardMarkup ( ** data )
instance . _raw = array
return instance
|
def translate_path ( self , path ) :
"""Translate a / - separated PATH to the local filename syntax .
Components that mean special things to the local file system
( e . g . drive or directory names ) are ignored . ( XXX They should
probably be diagnosed . )"""
|
# abandon query parameters
path = path . split ( '?' , 1 ) [ 0 ]
path = path . split ( '#' , 1 ) [ 0 ]
path = posixpath . normpath ( urllib_parse . unquote ( path ) )
words = path . split ( '/' )
words = filter ( None , words )
path = os . getcwd ( )
for word in words :
drive , word = os . path . splitdrive ( word )
head , word = os . path . split ( word )
if word in ( os . curdir , os . pardir ) :
continue
path = os . path . join ( path , word )
return path
|
def formfield_for_dbfield ( self , db_field , ** kwargs ) :
"""Make slug mandatory ."""
|
if db_field . name == "slug" :
kwargs [ "required" ] = True
kwargs [ "help_text" ] = None
return super ( LinkAdmin , self ) . formfield_for_dbfield ( db_field , ** kwargs )
|
def save_results ( self , path = '.' ) :
"""Save results on disk ."""
|
with open ( path , 'wb' ) as f :
pickle . dump ( self . results , f )
|
def add ( self , key ) :
"""Adds a key to this bloom filter .
If the key already exists in this filter it will return True .
Otherwise False .
> > > b = ScalableBloomFilter ( initial _ capacity = 100 , error _ rate = 0.001 , mode = ScalableBloomFilter . SMALL _ SET _ GROWTH )
> > > b . add ( " hello " )
False
> > > b . add ( " hello " )
True"""
|
if key in self :
return True
if not self . filters :
filter = BloomFilter ( capacity = self . initial_capacity , error_rate = self . error_rate * ( 1.0 - self . ratio ) )
self . filters . append ( filter )
else :
filter = self . filters [ - 1 ]
if filter . count >= filter . capacity :
filter = BloomFilter ( capacity = filter . capacity * self . scale , error_rate = filter . error_rate * self . ratio )
self . filters . append ( filter )
filter . add ( key , skip_check = True )
return False
|
def w ( self ) :
"""int : The width of the texture in pixels ."""
|
w = ffi . new ( 'int *' )
check_int_err ( lib . SDL_QueryTexture ( self . _ptr , ffi . NULL , ffi . NULL , w , ffi . NULL ) )
return w [ 0 ]
|
def android_setup_view ( request ) :
"""Set up a GCM session .
This does * not * require a valid login session . Instead , a token from the client
session is sent to the Android backend , which queries a POST request to this view .
The " android _ gcm _ rand " is randomly set when the Android app is detected through
the user agent . If it has the same value , it is assumed to be correct ."""
|
logger . debug ( request . POST )
if request . method == "POST" :
if "user_token" in request . POST and "gcm_token" in request . POST :
user_token = request . POST . get ( "user_token" )
gcm_token = request . POST . get ( "gcm_token" )
logger . debug ( user_token )
logger . debug ( gcm_token )
try :
ncfg = NotificationConfig . objects . get ( android_gcm_rand = user_token )
except NotificationConfig . DoesNotExist :
logger . debug ( "No pair" )
return HttpResponse ( '{"error":"Invalid data."}' , content_type = "text/json" )
ncfg . gcm_token = gcm_token
ncfg . android_gcm_rand = None
ncfg . android_gcm_date = None
ncfg . save ( )
return HttpResponse ( '{"success":"Now registered."}' , content_type = "text/json" )
return HttpResponse ( '{"error":"Invalid arguments."}' , content_type = "text/json" )
|
def get_kernelspec ( self , name ) :
"""Get a kernel specification dictionary given a kernel name"""
|
ksm = KernelSpecManager ( )
kernelspec = ksm . get_kernel_spec ( name ) . to_dict ( )
kernelspec [ 'name' ] = name
kernelspec . pop ( 'argv' )
return kernelspec
|
def get_option_lists ( self ) :
"""A hook to override the option lists used to generate option names
and defaults ."""
|
return [ self . get_option_list ( ) ] + [ option_list for name , description , option_list in self . get_option_groups ( ) ]
|
def splitText ( text ) :
"""Split text into sub segments of size not bigger than MAX _ SEGMENT _ SIZE ."""
|
segments = [ ]
remaining_text = __class__ . cleanSpaces ( text )
while len ( remaining_text ) > __class__ . MAX_SEGMENT_SIZE :
cur_text = remaining_text [ : __class__ . MAX_SEGMENT_SIZE ]
# try to split at punctuation
split_idx = __class__ . findLastCharIndexMatching ( cur_text , # https : / / en . wikipedia . org / wiki / Unicode _ character _ property # General _ Category
lambda x : unicodedata . category ( x ) in ( "Ps" , "Pe" , "Pi" , "Pf" , "Po" ) )
if split_idx is None : # try to split at whitespace
split_idx = __class__ . findLastCharIndexMatching ( cur_text , lambda x : unicodedata . category ( x ) . startswith ( "Z" ) )
if split_idx is None : # try to split at anything not a letter or number
split_idx = __class__ . findLastCharIndexMatching ( cur_text , lambda x : not ( unicodedata . category ( x ) [ 0 ] in ( "L" , "N" ) ) )
if split_idx is None : # split at the last char
split_idx = __class__ . MAX_SEGMENT_SIZE - 1
new_segment = cur_text [ : split_idx + 1 ] . rstrip ( )
segments . append ( new_segment )
remaining_text = remaining_text [ split_idx + 1 : ] . lstrip ( string . whitespace + string . punctuation )
if remaining_text :
segments . append ( remaining_text )
return segments
|
def dcc_event ( regexp , callback = None , iotype = 'in' , venusian_category = 'irc3.dcc' ) :
"""Work like : class : ` ~ irc3 . dec . event ` but occurs during DCC CHATs"""
|
return event ( regexp , callback = callback , iotype = 'dcc_' + iotype , venusian_category = venusian_category )
|
def remove_role_from_user ( self , user , role ) :
"""Removes a role from a user .
: param user : The user to manipulate
: param role : The role to remove from the user"""
|
rv = False
user , role = self . _prepare_role_modify_args ( user , role )
if role in user . roles :
rv = True
user . roles . remove ( role )
self . put ( user )
return rv
|
def run ( self ) :
"""Creates a new replica using WAL - E
Returns
ExitCode
0 = Success
1 = Error , try again
2 = Error , don ' t try again"""
|
if self . init_error :
logger . error ( 'init error: %r did not exist at initialization time' , self . wal_e . env_dir )
return ExitCode . FAIL
try :
should_use_s3 = self . should_use_s3_to_create_replica ( )
if should_use_s3 is None : # Need to retry
return ExitCode . RETRY_LATER
elif should_use_s3 :
return self . create_replica_with_s3 ( )
elif not should_use_s3 :
return ExitCode . FAIL
except Exception :
logger . exception ( "Unhandled exception when running WAL-E restore" )
return ExitCode . FAIL
|
def end ( self ) :
"""Write all JSON data to files ."""
|
for comic in self . data :
with codecs . open ( self . jsonFn ( comic ) , 'w' , self . encoding ) as f :
json . dump ( self . data [ comic ] , f , indent = 2 , separators = ( ',' , ': ' ) , sort_keys = True )
|
def get_grouped ( self , go_ntsets , go_all , gosubdag , ** kws ) :
"""Get Grouped object ."""
|
kws_grpd = { k : v for k , v in kws . items ( ) if k in Grouped . kws_dict }
kws_grpd [ 'go2nt' ] = self . _init_go2ntpresent ( go_ntsets , go_all , gosubdag )
return Grouped ( gosubdag , self . godag . version , ** kws_grpd )
|
def minimizePowell ( objectiveFunction , parameter_guess , verbose = False ) :
'''Minimizes the objective function using a derivative - free Powell algorithm ,
starting from an initial parameter guess .
Parameters
objectiveFunction : function
The function to be minimized . It should take only a single argument , which
should be a list representing the parameters to be estimated .
parameter _ guess : [ float ]
A starting point for the Powell algorithm , which must be a valid
input for objectiveFunction .
verbose : boolean
A flag for the amount of output to print .
Returns
xopt : [ float ]
The values that minimize objectiveFunction .'''
|
# Execute the minimization , starting from the given parameter guess
t0 = time ( )
# Time the process
OUTPUT = fmin_powell ( objectiveFunction , parameter_guess , full_output = 1 , maxiter = 1000 , disp = verbose )
t1 = time ( )
# Extract values from optimization output :
xopt = OUTPUT [ 0 ]
# Parameters that minimize function .
fopt = OUTPUT [ 1 ]
# Value of function at minimum : ` ` fopt = func ( xopt ) ` ` .
direc = OUTPUT [ 2 ]
optiter = OUTPUT [ 3 ]
# Number of iterations performed .
funcalls = OUTPUT [ 4 ]
# Number of function calls made .
warnflag = OUTPUT [ 5 ]
# warnflag : int
# 1 : Maximum number of function evaluations made .
# 2 : Maximum number of iterations reached .
# Check that optimization succeeded :
if warnflag != 0 :
warnings . warn ( "Minimization failed! xopt=" + str ( xopt ) + ', fopt=' + str ( fopt ) + ', direc=' + str ( direc ) + ', optiter=' + str ( optiter ) + ', funcalls=' + str ( funcalls ) + ', warnflag=' + str ( warnflag ) )
# Display and return the results :
if verbose :
print ( "Time to estimate is " + str ( t1 - t0 ) + " seconds." )
return xopt
|
def get_one_dimensional_kernel ( self , dim ) :
"""Specially intended for Grid regression ."""
|
oneDkernel = GridRBF ( input_dim = 1 , variance = self . variance . copy ( ) , originalDimensions = dim )
return oneDkernel
|
def axml ( input_ , output , file_ , resource ) :
"""Parse the AndroidManifest . xml .
Parsing is either direct or from a given APK and prints in XML format or
saves to file .
This tool can also be used to process any AXML encoded file , for example
from the layout directory .
Example :
$ androguard axml AndroidManifest . xml"""
|
if file_ is not None and input_ is not None :
print ( "Can not give --input and positional argument! " "Please use only one of them!" )
sys . exit ( 1 )
if file_ is None and input_ is None :
print ( "Give one file to decode!" )
sys . exit ( 1 )
if file_ is not None :
androaxml_main ( file_ , output , resource )
elif input_ is not None :
androaxml_main ( input_ , output , resource )
|
def pool ( builder , size , timeout = None ) :
"""Create a pool that imposes a limit on the number of stored
instances .
Args :
builder : a function to build an instance .
size : the size of the pool .
timeout ( Optional [ float ] ) : the seconds to wait before raising
a ` ` queue . Empty ` ` exception if no instances are available
within that time .
Raises :
If ` ` timeout ` ` is defined but the request is taking longer
than the specified time , the context manager will raise
a ` ` queue . Empty ` ` exception .
Returns :
A context manager that can be used with the ` ` with ` `
statement ."""
|
lock = threading . Lock ( )
local_pool = queue . Queue ( )
current_size = 0
@ contextlib . contextmanager
def pooled ( ) :
nonlocal current_size
instance = None
# If we still have free slots , then we have room to create new
# instances .
if current_size < size :
with lock : # We need to check again if we have slots available , since
# the situation might be different after acquiring the lock
if current_size < size :
current_size += 1
instance = builder ( )
# Watchout : current _ size can be equal to size if the previous part of
# the function has been executed , that ' s why we need to check if the
# instance is None .
if instance is None :
instance = local_pool . get ( timeout = timeout )
yield instance
local_pool . put ( instance )
return pooled
|
def set_ref ( self , ref_key , ref_id ) :
"""Using a ref key and ref id set the
reference to the appropriate resource type ."""
|
if ref_key == 'NETWORK' :
self . network_id = ref_id
elif ref_key == 'NODE' :
self . node_id = ref_id
elif ref_key == 'LINK' :
self . link_id = ref_id
elif ref_key == 'GROUP' :
self . group_id = ref_id
elif ref_key == 'SCENARIO' :
self . scenario_id = ref_id
elif ref_key == 'PROJECT' :
self . project_id = ref_id
else :
raise HydraError ( "Ref Key %s not recognised." % ref_key )
|
def marketplace ( self ) :
""": returns : Version marketplace of preview
: rtype : twilio . rest . preview . marketplace . Marketplace"""
|
if self . _marketplace is None :
self . _marketplace = Marketplace ( self )
return self . _marketplace
|
def dict_to_schema ( schema_dict , required , allow_custom_keys = True , modifier = None ) :
"""Convert a dict of Schemas into a Schema .
Args :
required ( bool ) : Whether to make schema keys optional or required .
allow _ custom _ keys ( bool , optional ) : If True , creates a schema that
allows custom items in dicts .
modifier ( callable ) : Functor to apply to dict values - it is applied
via ` Schema . Use ` .
Returns :
A ` Schema ` object ."""
|
if modifier :
modifier = Use ( modifier )
def _to ( value ) :
if isinstance ( value , dict ) :
d = { }
for k , v in value . iteritems ( ) :
if isinstance ( k , basestring ) :
k = Required ( k ) if required else Optional ( k )
d [ k ] = _to ( v )
if allow_custom_keys :
d [ Optional ( basestring ) ] = modifier or object
schema = Schema ( d )
elif modifier :
schema = And ( value , modifier )
else :
schema = value
return schema
return _to ( schema_dict )
|
def startConnection ( self , point , cls = None , output = True ) :
"""Starts creating a new connection from the given output point . If a connection class is not provided , then the defaultConnectionClass will be used .
: param point < QPointF >
cls subclass of < XNodeConnection >
output < bool >"""
|
# clear the last connection
self . finishConnection ( False )
# create a new connection
self . _activeConnection = self . addConnection ( cls )
if self . _activeConnection :
self . _activeConnection . setOutputPoint ( point )
self . _activeConnection . setInputPoint ( point )
self . _activeConnection . setCustomData ( '__output__' , output )
return self . _activeConnection
|
def _get_version_for_class_from_state ( state , klass ) :
"""retrieves the version of the current klass from the state mapping from old locations to new ones ."""
|
# klass may have been renamed , so we have to look this up in the class rename registry .
names = [ _importable_name ( klass ) ]
# lookup old names , handled by current klass .
from . util import class_rename_registry
names . extend ( class_rename_registry . old_handled_by ( klass ) )
for n in names :
try :
return state [ 'class_tree_versions' ] [ n ]
except KeyError :
continue
# if we did not find a suitable version number return infinity .
if _debug :
logger . debug ( 'unable to obtain a __serialize_version for class %s' , klass )
return float ( 'inf' )
|
def vars_to_array ( self ) :
"""Convert ` self . vars ` to a numpy array
Returns
numpy . array"""
|
logger . warn ( 'This function is deprecated. You can inspect `self.np_vars` directly as NumPy arrays ' 'without conversion.' )
if not self . vars :
return None
vars_matrix = matrix ( self . vars , size = ( self . vars [ 0 ] . size [ 0 ] , len ( self . vars ) ) ) . trans ( )
self . vars_array = np . array ( vars_matrix )
return self . vars_array
|
def hash_pair ( first : Keccak256 , second : Optional [ Keccak256 ] ) -> Keccak256 :
"""Computes the keccak hash of the elements ordered topologically .
Since a merkle proof will not include all the elements , but only the path
starting from the leaves up to the root , the order of the elements is not
known by the proof checker . The topological order is used as a
deterministic way of ordering the elements making sure the smart contract
verification and the python code are compatible ."""
|
assert first is not None
if second is None :
return first
if first > second :
return sha3 ( second + first )
return sha3 ( first + second )
|
def delayed_bamprep_merge ( samples , run_parallel ) :
"""Perform a delayed merge on regional prepared BAM files ."""
|
if any ( "combine" in data [ 0 ] for data in samples ) :
return run_parallel ( "delayed_bam_merge" , samples )
else :
return samples
|
def get_aux_files ( basename ) :
"""Look for and return all the aux files that are associated witht this filename .
Will look for :
background ( _ bkg . fits )
rms ( _ rms . fits )
mask ( . mim )
catalogue ( _ comp . fits )
psf map ( _ psf . fits )
will return filenames if they exist , or None where they do not .
Parameters
basename : str
The name / path of the input image .
Returns
aux : dict
Dict of filenames or None with keys ( bkg , rms , mask , cat , psf )"""
|
base = os . path . splitext ( basename ) [ 0 ]
files = { "bkg" : base + "_bkg.fits" , "rms" : base + "_rms.fits" , "mask" : base + ".mim" , "cat" : base + "_comp.fits" , "psf" : base + "_psf.fits" }
for k in files . keys ( ) :
if not os . path . exists ( files [ k ] ) :
files [ k ] = None
return files
|
def reindex ( self , fq = [ ] , ** kwargs ) :
'''Starts Reindexing Process . All parameter arguments will be passed down to the getter function .
: param string fq : FilterQuery to pass to source Solr to retrieve items . This can be used to limit the results .'''
|
for items in self . _getter ( fq = fq , ** kwargs ) :
self . _putter ( items )
if type ( self . _dest ) is SolrClient and self . _dest_coll :
self . log . info ( "Finished Indexing, sending a commit" )
self . _dest . commit ( self . _dest_coll , openSearcher = True )
|
def attribute_crawl ( self , key ) :
"""Grab all attribute values associated with the given feature .
Traverse the given feature ( and all of its descendants ) to find all
values associated with the given attribute key .
> > > import tag
> > > reader = tag . GFF3Reader ( tag . pkgdata ( ' otau - no - seqreg . gff3 ' ) )
> > > features = tag . select . features ( reader )
> > > for feature in features :
. . . names = feature . attribute _ crawl ( ' Name ' )
. . . print ( sorted ( list ( names ) ) )
[ ' Ot01g00060 ' , ' XM _ 003074019.1 ' , ' XP _ 003074065.1 ' ]
[ ' Ot01g00070 ' , ' XM _ 003074020.1 ' , ' XP _ 003074066.1 ' ]
[ ' Ot01g00080 ' , ' XM _ 003074021.1 ' , ' XP _ 003074067.1 ' ]
[ ' Ot01g00090 ' , ' XM _ 003074022.1 ' , ' XP _ 003074068.1 ' ]
[ ' Ot01g00100 ' , ' XM _ 003074023.1 ' , ' XP _ 003074069.1 ' ]
[ ' Ot01g00110 ' , ' XM _ 003074024.1 ' , ' XP _ 003074070.1 ' ]"""
|
union = set ( )
for feature in self :
values = feature . get_attribute ( key , as_list = True )
if values is not None :
union . update ( set ( values ) )
return union
|
def role_list ( ) :
'''List all available roles
CLI Example :
. . code - block : : bash
salt ' * ' rbac . role _ list'''
|
roles = { }
# # read user _ attr file ( user : qualifier : res1 : res2 : attr )
with salt . utils . files . fopen ( '/etc/user_attr' , 'r' ) as user_attr :
for role in user_attr :
role = salt . utils . stringutils . to_unicode ( role )
role = role . split ( ':' )
# skip comments and non complaint lines
if len ( role ) != 5 :
continue
# parse attr
attrs = { }
for attr in role [ 4 ] . split ( ';' ) :
attr_key , attr_val = attr . split ( '=' )
if attr_key in [ 'auths' , 'profiles' , 'roles' ] :
attrs [ attr_key ] = attr_val . split ( ',' )
else :
attrs [ attr_key ] = attr_val
role [ 4 ] = attrs
# add role info to dict
if 'type' in role [ 4 ] and role [ 4 ] [ 'type' ] == 'role' :
del role [ 4 ] [ 'type' ]
roles [ role [ 0 ] ] = role [ 4 ]
return roles
|
def by_occupied_housing_units ( self , lower = - 1 , upper = 2 ** 31 , zipcode_type = ZipcodeType . Standard , sort_by = SimpleZipcode . occupied_housing_units . name , ascending = False , returns = DEFAULT_LIMIT ) :
"""Search zipcode information by occupied house of units ."""
|
return self . query ( occupied_housing_units_lower = lower , occupied_housing_units_upper = upper , sort_by = sort_by , zipcode_type = zipcode_type , ascending = ascending , returns = returns , )
|
def _spark_streaming_statistics_metrics ( self , instance , running_apps , addl_tags , requests_config ) :
"""Get metrics for each application streaming statistics ."""
|
for app_id , ( app_name , tracking_url ) in iteritems ( running_apps ) :
try :
base_url = self . _get_request_url ( instance , tracking_url )
response = self . _rest_request_to_json ( base_url , SPARK_APPS_PATH , SPARK_SERVICE_CHECK , requests_config , addl_tags , app_id , 'streaming/statistics' , )
self . log . debug ( 'streaming/statistics: %s' , response )
tags = [ 'app_name:%s' % str ( app_name ) ]
tags . extend ( addl_tags )
# NOTE : response is a dict
self . _set_metrics_from_json ( tags , response , SPARK_STREAMING_STATISTICS_METRICS )
except HTTPError as e : # NOTE : If api call returns response 404
# then it means that the application is not a streaming application , we should skip metric submission
if e . response . status_code != 404 :
raise
|
def _from_func ( cls , f , * args , _attr_scts = None , ** kwargs ) :
"""Creates a function chain starting with the specified SCT ( f ) , and its arguments ."""
|
func_chain = cls ( attr_scts = _attr_scts )
func_chain . _stack . append ( [ f , args , kwargs ] )
return func_chain
|
def arg_comparitor ( name ) :
""": param arg name
: return : pair containing name , comparitor
given an argument name , munge it and return a proper comparitor
> > > arg _ comparitor ( " a " )
a , operator . eq
> > > arg _ comparitor ( " a _ _ in " )
a , operator . contains"""
|
if name . endswith ( "__in" ) :
return name [ : - 4 ] , contains
elif name . endswith ( "__ge" ) :
return name [ : - 4 ] , ge
elif name . endswith ( "__gt" ) :
return name [ : - 4 ] , gt
elif name . endswith ( "__le" ) :
return name [ : - 4 ] , le
elif name . endswith ( "__lt" ) :
return name [ : - 4 ] , lt
if name . endswith ( "__eq" ) :
return name [ : - 4 ] , eq
if name . endswith ( "__ne" ) :
return name [ : - 4 ] , ne
else :
return name , eq
|
def rm ( device , minor ) : # pylint : disable = C0103
'''Removes the partition with number < minor > .
CLI Example :
. . code - block : : bash
salt ' * ' partition . rm / dev / sda 5'''
|
_validate_device ( device )
try :
int ( minor )
except Exception :
raise CommandExecutionError ( 'Invalid minor number passed to partition.rm' )
cmd = 'parted -m -s {0} rm {1}' . format ( device , minor )
out = __salt__ [ 'cmd.run' ] ( cmd ) . splitlines ( )
return out
|
def calculate_bin_widths ( edges ) :
"""Calculate the widths of wavelengths bins given their edges .
Parameters
edges : array _ like
Sequence of bin edges . Must be 1D and have at least two values .
Returns
widths : ndarray
Array of bin widths . Will be 1D and have one less value than ` ` edges ` ` ."""
|
edges = np . asanyarray ( edges )
if edges . ndim != 1 :
raise ValueError ( 'edges input array must be 1D.' )
if edges . size < 2 :
raise ValueError ( 'edges input must have at least two values.' )
return edges [ 1 : ] - edges [ : - 1 ]
|
def strip_html ( value , allowed_tags = [ ] , allowed_attributes = [ ] , allowed_styles = [ ] ) :
"""Template tag to strip html from string values . It accepts lists of
allowed tags , attributes or stylesin comma separated string or list format .
For example :
{ % load sanitizer % }
{ % strip _ html ' < a href = " " > bar < / a > < script > alert ( ' baz ' ) < / script > ' " a , img ' ' href ' , src ' % }
Will output :
< a href = " " > bar < / a > alert ( ' baz ' ) ;
On django 1.4 you could also use keyword arguments :
{ % strip _ html ' < a href = " " > bar < / a > ' allowed _ tags = " a , img ' allowed _ attributes = ' href ' , src ' % }"""
|
if isinstance ( value , basestring ) :
value = bleach . clean ( value , tags = allowed_tags , attributes = allowed_attributes , styles = allowed_styles , strip = True )
return value
|
def compress_file ( filepath , compression = "gz" ) :
"""Compresses a file with the correct extension . Functions like standard
Unix command line gzip and bzip2 in the sense that the original
uncompressed files are not retained .
Args :
filepath ( str ) : Path to file .
compression ( str ) : A compression mode . Valid options are " gz " or
" bz2 " . Defaults to " gz " ."""
|
if compression not in [ "gz" , "bz2" ] :
raise ValueError ( "Supported compression formats are 'gz' and 'bz2'." )
from monty . io import zopen
if not filepath . lower ( ) . endswith ( ".%s" % compression ) :
with open ( filepath , 'rb' ) as f_in , zopen ( '%s.%s' % ( filepath , compression ) , 'wb' ) as f_out :
f_out . writelines ( f_in )
os . remove ( filepath )
|
def hash_file ( fileobj ) :
""": param fileobj : a file object
: return : a hash of the file content"""
|
hasher = hashlib . md5 ( )
buf = fileobj . read ( 65536 )
while len ( buf ) > 0 :
hasher . update ( buf )
buf = fileobj . read ( 65536 )
return hasher . hexdigest ( )
|
def clear_end_date ( self ) :
"""Clears the end date .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *"""
|
if ( self . get_end_date_metadata ( ) . is_read_only ( ) or self . get_end_date_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'endDate' ] = DateTime ( ** self . _end_date_metadata [ 'default_date_time_values' ] [ 0 ] )
|
def main ( ) :
"""The entry point for the Hyper - V Neutron Agent ."""
|
neutron_config . register_agent_state_opts_helper ( CONF )
common_config . init ( sys . argv [ 1 : ] )
neutron_config . setup_logging ( )
hyperv_agent = HyperVNeutronAgent ( )
# Start everything .
LOG . info ( "Agent initialized successfully, now running... " )
hyperv_agent . daemon_loop ( )
|
def variants ( self , case_id , skip = 0 , count = 1000 , filters = None ) :
"""Fetch variants for a case ."""
|
filters = filters or { }
logger . debug ( "Fetching case with case_id: {0}" . format ( case_id ) )
case_obj = self . case ( case_id )
plugin , case_id = self . select_plugin ( case_obj )
self . filters = plugin . filters
gene_lists = ( self . gene_list ( list_id ) for list_id in filters . get ( 'gene_lists' , [ ] ) )
nested_geneids = ( gene_list . gene_ids for gene_list in gene_lists )
gene_ids = set ( itertools . chain . from_iterable ( nested_geneids ) )
if filters . get ( 'gene_ids' ) :
filters [ 'gene_ids' ] . extend ( gene_ids )
else :
filters [ 'gene_ids' ] = gene_ids
variants = plugin . variants ( case_id , skip , count , filters )
return variants
|
def get_plate_list ( self , market , plate_class ) :
"""获取板块集合下的子板块列表
: param market : 市场标识 , 注意这里不区分沪 , 深 , 输入沪或者深都会返回沪深市场的子板块 ( 这个是和客户端保持一致的 ) 参见Market
: param plate _ class : 板块分类 , 参见Plate
: return : ret = = RET _ OK 返回pd dataframe数据 , data . DataFrame数据 , 数据列格式如下
ret ! = RET _ OK 返回错误字符串
参数 类型 说明
code str 股票代码
plate _ name str 板块名字
plate _ id str 板块id"""
|
param_table = { 'market' : market , 'plate_class' : plate_class }
for x in param_table :
param = param_table [ x ]
if param is None or is_str ( market ) is False :
error_str = ERROR_STR_PREFIX + "the type of market param is wrong"
return RET_ERROR , error_str
if market not in MKT_MAP :
error_str = ERROR_STR_PREFIX + "the value of market param is wrong "
return RET_ERROR , error_str
if plate_class not in PLATE_CLASS_MAP :
error_str = ERROR_STR_PREFIX + "the class of plate is wrong"
return RET_ERROR , error_str
query_processor = self . _get_sync_query_processor ( SubplateQuery . pack_req , SubplateQuery . unpack_rsp )
kargs = { 'market' : market , 'plate_class' : plate_class , 'conn_id' : self . get_sync_conn_id ( ) }
ret_code , msg , subplate_list = query_processor ( ** kargs )
if ret_code == RET_ERROR :
return ret_code , msg
col_list = [ 'code' , 'plate_name' , 'plate_id' ]
subplate_frame_table = pd . DataFrame ( subplate_list , columns = col_list )
return RET_OK , subplate_frame_table
|
def contraction_sharp ( Di1 , Di2 ) :
r'''Returns loss coefficient for any sharp edged pipe contraction
as shown in [ 1 ] _ .
. . math : :
K = 0.0696(1 - \ beta ^ 5 ) \ lambda ^ 2 + ( \ lambda - 1 ) ^ 2
. . math : :
\ lambda = 1 + 0.622(1-0.215 \ beta ^ 2 - 0.785 \ beta ^ 5)
. . math : :
\ beta = d _ 2 / d _ 1
. . figure : : fittings / contraction _ sharp . png
: scale : 40 %
: alt : Sharp contraction ; after [ 1 ] _
Parameters
Di1 : float
Inside diameter of original pipe , [ m ]
Di2 : float
Inside diameter of following pipe , [ m ]
Returns
K : float
Loss coefficient in terms of the following pipe [ - ]
Notes
A value of 0.506 or simply 0.5 is often used .
Examples
> > > contraction _ sharp ( Di1 = 1 , Di2 = 0.4)
0.5301269161591805
References
. . [ 1 ] Rennels , Donald C . , and Hobart M . Hudson . Pipe Flow : A Practical
and Comprehensive Guide . 1st edition . Hoboken , N . J : Wiley , 2012.'''
|
beta = Di2 / Di1
lbd = 1 + 0.622 * ( 1 - 0.215 * beta ** 2 - 0.785 * beta ** 5 )
return 0.0696 * ( 1 - beta ** 5 ) * lbd ** 2 + ( lbd - 1 ) ** 2
|
def checkStatus ( self ) :
"""Check status
Args :
Returns :
True : Sucess
False : Failed"""
|
checkAccount ( )
data = { 'userid' : self . user_id , 'useridx' : self . useridx }
r = self . session . post ( nurls [ 'checkStatus' ] , data = data )
p = re . compile ( r'\<message\>(?P<message>.+)\</message\>' )
message = p . search ( r . text ) . group ( 'message' )
if message == 'success' :
return True
else :
return False
|
def load_cli ( subparsers ) :
"""Given a parser , load the CLI subcommands"""
|
for command_name in available_commands ( ) :
module = '{}.{}' . format ( __package__ , command_name )
loader , description = _import_loader ( module )
parser = subparsers . add_parser ( command_name , description = description )
command = loader ( parser )
if command is None :
raise RuntimeError ( 'Failed to load "{}".' . format ( command_name ) )
parser . set_defaults ( cmmd = command )
|
def read_pkl_and_pklz ( filename ) :
"""Try read zipped or not zipped pickle file"""
|
fcontent = None
try :
import gzip
f = gzip . open ( filename , 'rb' )
fcontent = f . read ( )
f . close ( )
except IOError as e : # if the problem is in not gzip file
logger . info ( "Input gzip exception: " + str ( e ) )
f = open ( filename , 'rb' )
fcontent = f . read ( )
f . close ( )
except Exception as e : # other problem
import traceback
logger . error ( "Input gzip exception: " + str ( e ) )
logger . error ( traceback . format_exc ( ) )
return fcontent
|
def index2bool ( index , length = None ) :
"""Returns a numpy boolean array with Trues in the input index
positions .
: param index : index array with the Trues positions .
: type index : ndarray ( type = int )
: param length : Length of the returned array .
: type length : int or None
: returns : array with Trues in the input index positions .
: rtype : ndarray
. . seealso : : : func : ` bool2index `"""
|
if index . shape [ 0 ] == 0 and length is None :
return np . arange ( 0 , dtype = bool )
if length is None :
length = index . max ( ) + 1
sol = np . zeros ( length , dtype = bool )
sol [ index ] = True
return sol
|
def move ( self , folder , ** kwargs ) :
''': param folder : Folder route to which to move the object
: type folder : string
: raises : : exc : ` ~ dxpy . exceptions . DXError ` if no project is associated with the object
Moves the associated remote object to * folder * .'''
|
if self . _proj is None :
raise DXError ( "Move called when a project ID was not associated with this object handler" )
dxpy . api . project_move ( self . _proj , { "objects" : [ self . _dxid ] , "destination" : folder } , ** kwargs )
|
def romanized ( locale : str = '' ) -> Callable :
"""Romanize the Cyrillic text .
Transliterate the Cyrillic language from the Cyrillic
script into the Latin alphabet .
. . note : : At this moment it works only for ` ru ` , ` uk ` , ` kk ` .
: param locale : Locale code .
: return : Latinized text ."""
|
def romanized_deco ( func ) :
@ functools . wraps ( func )
def wrapper ( * args , ** kwargs ) :
try : # String can contain ascii symbols , digits and
# punctuation symbols .
alphabet = { s : s for s in letters + digits + punctuation }
alphabet . update ( data . ROMANIZATION_DICT [ locale ] )
# Add common cyrillic letters
alphabet . update ( data . COMMON_LETTERS )
except KeyError :
raise UnsupportedLocale ( locale )
result = func ( * args , ** kwargs )
txt = '' . join ( [ alphabet [ i ] for i in result if i in alphabet ] )
return txt
return wrapper
return romanized_deco
|
def call_set_attr ( node : Node , key : str , value ) :
"""Calls node setter"""
|
node . set_attr ( key , value )
|
def calculate_sun ( self , month , day , hour , is_solar_time = False ) :
"""Get Sun data for an hour of the year .
Args :
month : An integer between 1-12
day : An integer between 1-31
hour : A positive number between 0 . . 23
is _ solar _ time : A boolean to indicate if the input hour is solar time .
( Default : False )
Returns :
A sun object for this particular time"""
|
datetime = DateTime ( month , day , * self . _calculate_hour_and_minute ( hour ) , leap_year = self . is_leap_year )
return self . calculate_sun_from_date_time ( datetime , is_solar_time )
|
def maximum_triangle_path ( triangle , rows , cols ) :
"""A function to calculate the maximum sum of numbers from top to bottom of the triangle .
> > > maximum _ triangle _ path ( [ [ 1 , 0 , 0 ] , [ 4 , 8 , 0 ] , [ 1 , 5 , 3 ] ] , 2 , 2)
14
> > > maximum _ triangle _ path ( [ [ 13 , 0 , 0 ] , [ 7 , 4 , 0 ] , [ 2 , 4 , 6 ] ] , 2 , 2)
24
> > > maximum _ triangle _ path ( [ [ 2 , 0 , 0 ] , [ 11 , 18 , 0 ] , [ 21 , 25 , 33 ] ] , 2 , 2)
53"""
|
# Start from the second last row
for i in range ( rows - 1 , - 1 , - 1 ) : # Compute max of triangle [ i ] [ j ] and triangle [ i ] [ j + 1 ] , and add it to triangle [ i - 1 ] [ j ]
for j in range ( i + 1 ) : # Pick maximum sum value from the children
max_num = max ( triangle [ i + 1 ] [ j ] , triangle [ i + 1 ] [ j + 1 ] )
# Add max _ num to the current cell
triangle [ i ] [ j ] += max_num
# Return the top cell in triangle which now stores the maximum sum
return triangle [ 0 ] [ 0 ]
|
def parse_file ( self , file_or_fname ) :
"""Parse a file or a filename"""
|
with self . _context ( ) :
if hasattr ( file_or_fname , 'read' ) :
self . filename = getattr ( file_or_fname , 'name' , file_or_fname . __class__ . __name__ )
self . p . ParseFile ( file_or_fname )
else :
self . filename = file_or_fname
with open ( file_or_fname , 'rb' ) as f :
self . p . ParseFile ( f )
return self . _root
|
def clone ( self ) :
"""Deepclone the entity , but reset state"""
|
clone_copy = copy . deepcopy ( self )
clone_copy . state_ = EntityState ( )
return clone_copy
|
def stp ( br = None , state = 'disable' , iface = None ) :
'''Sets Spanning Tree Protocol state for a bridge
CLI Example :
. . code - block : : bash
salt ' * ' bridge . stp br0 enable
salt ' * ' bridge . stp br0 disable
For BSD - like operating systems , it is required to add the interface on
which to enable the STP .
CLI Example :
. . code - block : : bash
salt ' * ' bridge . stp bridge0 enable fxp0
salt ' * ' bridge . stp bridge0 disable fxp0'''
|
kernel = __grains__ [ 'kernel' ]
if kernel == 'Linux' :
states = { 'enable' : 'on' , 'disable' : 'off' }
return _os_dispatch ( 'stp' , br , states [ state ] )
elif kernel in SUPPORTED_BSD_LIKE :
states = { 'enable' : 'stp' , 'disable' : '-stp' }
return _os_dispatch ( 'stp' , br , states [ state ] , iface )
else :
return False
|
def send_headers ( self ) :
"""Sends the headers to the client"""
|
self . events . sync_emit ( 'headers' )
self . _set_default_headers ( )
header_str = self . status_line + self . EOL + str ( self . headers )
self . stream . write ( header_str . encode ( ) )
self . events . sync_emit ( 'after_headers' )
|
async def on_raw_319 ( self , message ) :
"""WHOIS active channels ."""
|
target , nickname , channels = message . params [ : 3 ]
channels = { channel . lstrip ( ) for channel in channels . strip ( ) . split ( ' ' ) }
info = { 'channels' : channels }
if nickname in self . _pending [ 'whois' ] :
self . _whois_info [ nickname ] . update ( info )
|
def associated_stream ( self ) :
"""Return the corresponding output or storage stream for an important system input .
Certain system inputs are designed as important and automatically
copied to output streams without requiring any manual interaction .
This method returns the corresponding stream for an important system
input . It will raise an InternalError unlesss the self . important
property is True .
Returns :
DataStream : The corresponding output or storage stream .
Raises :
InternalError : If this stream is not marked as an important system input ."""
|
if not self . important :
raise InternalError ( "You may only call autocopied_stream on when DataStream.important is True" , stream = self )
if self . stream_id >= DataStream . ImportantSystemStorageStart :
stream_type = DataStream . BufferedType
else :
stream_type = DataStream . OutputType
return DataStream ( stream_type , self . stream_id , True )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.