signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def convert_from_gps_time ( gps_time , gps_week = None ) :
"""Convert gps time in ticks to standard time .""" | converted_gps_time = None
gps_timestamp = float ( gps_time )
if gps_week != None : # image date
converted_gps_time = GPS_START + datetime . timedelta ( seconds = int ( gps_week ) * SECS_IN_WEEK + gps_timestamp )
else : # TAI scale with 1970-01-01 00:00:10 ( TAI ) epoch
os . environ [ 'TZ' ] = 'right/UTC'
# by definition
gps_time_as_gps = GPS_START + datetime . timedelta ( seconds = gps_timestamp )
# constant offset
gps_time_as_tai = gps_time_as_gps + datetime . timedelta ( seconds = 19 )
tai_epoch_as_tai = datetime . datetime ( 1970 , 1 , 1 , 0 , 0 , 10 )
# by definition
tai_timestamp = ( gps_time_as_tai - tai_epoch_as_tai ) . total_seconds ( )
converted_gps_time = ( datetime . datetime . utcfromtimestamp ( tai_timestamp ) )
# " right " timezone is in effect
return converted_gps_time |
def create_reward_encoder ( ) :
"""Creates TF ops to track and increment recent average cumulative reward .""" | last_reward = tf . Variable ( 0 , name = "last_reward" , trainable = False , dtype = tf . float32 )
new_reward = tf . placeholder ( shape = [ ] , dtype = tf . float32 , name = 'new_reward' )
update_reward = tf . assign ( last_reward , new_reward )
return last_reward , new_reward , update_reward |
def fixup_namespace_packages ( path_item , parent = None ) :
"""Ensure that previously - declared namespace packages include path _ item""" | _imp . acquire_lock ( )
try :
for package in _namespace_packages . get ( parent , ( ) ) :
subpath = _handle_ns ( package , path_item )
if subpath :
fixup_namespace_packages ( subpath , package )
finally :
_imp . release_lock ( ) |
def filter ( self , media_type , ** params ) :
"""iterate all the accept media types that match media _ type
media _ type - - string - - the media type to filter by
* * params - - dict - - further filter by key : val
return - - generator - - yields all matching media type info things""" | mtype , msubtype = self . _split_media_type ( media_type )
for x in self . __iter__ ( ) : # all the params have to match to make the media type valid
matched = True
for k , v in params . items ( ) :
if x [ 2 ] . get ( k , None ) != v :
matched = False
break
if matched :
if x [ 0 ] [ 0 ] == '*' :
if x [ 0 ] [ 1 ] == '*' :
yield x
elif x [ 0 ] [ 1 ] == msubtype :
yield x
elif mtype == '*' :
if msubtype == '*' :
yield x
elif x [ 0 ] [ 1 ] == msubtype :
yield x
elif x [ 0 ] [ 0 ] == mtype :
if msubtype == '*' :
yield x
elif x [ 0 ] [ 1 ] == '*' :
yield x
elif x [ 0 ] [ 1 ] == msubtype :
yield x |
def drop_column ( self , table , name ) :
"""Remove a column to an existing table .""" | try :
self . execute ( 'ALTER TABLE {0} DROP COLUMN {1}' . format ( wrap ( table ) , name ) )
self . _printer ( '\tDropped column {0} from {1}' . format ( name , table ) )
except ProgrammingError :
self . _printer ( "\tCan't DROP '{0}'; check that column/key exists in '{1}'" . format ( name , table ) )
return name |
def translate_sites ( self , indices , vector , frac_coords = True , to_unit_cell = True ) :
"""Translate specific sites by some vector , keeping the sites within the
unit cell .
Args :
indices : Integer or List of site indices on which to perform the
translation .
vector : Translation vector for sites .
frac _ coords ( bool ) : Whether the vector corresponds to fractional or
cartesian coordinates .
to _ unit _ cell ( bool ) : Whether new sites are transformed to unit
cell""" | if not isinstance ( indices , collections . abc . Iterable ) :
indices = [ indices ]
for i in indices :
site = self . _sites [ i ]
if frac_coords :
fcoords = site . frac_coords + vector
else :
fcoords = self . _lattice . get_fractional_coords ( site . coords + vector )
if to_unit_cell :
fcoords = np . mod ( fcoords , 1 )
self . _sites [ i ] . frac_coords = fcoords |
def check_image_is_4d ( img , min_num_volumes = 2 ) :
"""Ensures the image loaded is 3d and nothing else .""" | if len ( img . shape ) < 4 :
raise ValueError ( 'Input volume must be 4D!' )
elif len ( img . shape ) == 4 :
for dim_size in img . shape [ : 3 ] :
if dim_size < 1 :
raise ValueError ( 'Atleast one slice must exist in each dimension' )
if img . shape [ 3 ] < min_num_volumes :
raise ValueError ( 'Input volume is 4D ' 'with less than {} volumes!' . format ( min_num_volumes ) )
elif len ( img . shape ) > 4 :
raise ValueError ( 'Too many dimensions : more than 4.\n' 'Invalid shape of image : {}' . format ( img . shape ) )
return img |
def _format_info ( data ) :
'''Return user information in a pretty way''' | return { 'gid' : data . pw_gid , 'groups' : list_groups ( data . pw_name ) , 'home' : data . pw_dir , 'name' : data . pw_name , 'shell' : data . pw_shell , 'uid' : data . pw_uid , 'fullname' : data . pw_gecos } |
def df ( self ) :
"""Get data usage information .
Returns :
( dict ) : A dictionary representing different resource categories
and their respective data usage .
Raises :
: py : class : ` docker . errors . APIError `
If the server returns an error .""" | url = self . _url ( '/system/df' )
return self . _result ( self . _get ( url ) , True ) |
def parse_func_kwarg_keys ( func , with_vals = False ) :
"""hacky inference of kwargs keys
SeeAlso :
argparse _ funckw
recursive _ parse _ kwargs
parse _ kwarg _ keys
parse _ func _ kwarg _ keys
get _ func _ kwargs""" | sourcecode = get_func_sourcecode ( func , strip_docstr = True , strip_comments = True )
kwkeys = parse_kwarg_keys ( sourcecode , with_vals = with_vals )
# ut . get _ func _ kwargs TODO
return kwkeys |
def add_triangle ( self , neighbors , color , center = None , opacity = 0.4 , draw_edges = False , edges_color = [ 0.0 , 0.0 , 0.0 ] , edges_linewidth = 2 ) :
"""Adds a triangular surface between three atoms .
Args :
atoms : Atoms between which a triangle will be drawn .
color : Color for triangle as RGB .
center : The " central atom " of the triangle
opacity : opacity of the triangle
draw _ edges : If set to True , the a line will be drawn at each edge
edges _ color : Color of the line for the edges
edges _ linewidth : Width of the line drawn for the edges""" | points = vtk . vtkPoints ( )
triangle = vtk . vtkTriangle ( )
for ii in range ( 3 ) :
points . InsertNextPoint ( neighbors [ ii ] . x , neighbors [ ii ] . y , neighbors [ ii ] . z )
triangle . GetPointIds ( ) . SetId ( ii , ii )
triangles = vtk . vtkCellArray ( )
triangles . InsertNextCell ( triangle )
# polydata object
trianglePolyData = vtk . vtkPolyData ( )
trianglePolyData . SetPoints ( points )
trianglePolyData . SetPolys ( triangles )
# mapper
mapper = vtk . vtkPolyDataMapper ( )
mapper . SetInput ( trianglePolyData )
ac = vtk . vtkActor ( )
ac . SetMapper ( mapper )
ac . GetProperty ( ) . SetOpacity ( opacity )
if color == 'element' :
if center is None :
raise ValueError ( 'Color should be chosen according to the central atom, ' 'and central atom is not provided' )
# If partial occupations are involved , the color of the specie with
# the highest occupation is used
myoccu = 0.0
for specie , occu in center . species . items ( ) :
if occu > myoccu :
myspecie = specie
myoccu = occu
color = [ i / 255 for i in self . el_color_mapping [ myspecie . symbol ] ]
ac . GetProperty ( ) . SetColor ( color )
else :
ac . GetProperty ( ) . SetColor ( color )
if draw_edges :
ac . GetProperty ( ) . SetEdgeColor ( edges_color )
ac . GetProperty ( ) . SetLineWidth ( edges_linewidth )
ac . GetProperty ( ) . EdgeVisibilityOn ( )
self . ren . AddActor ( ac ) |
def fields ( self ) :
"""Access the fields
: returns : twilio . rest . autopilot . v1 . assistant . task . field . FieldList
: rtype : twilio . rest . autopilot . v1 . assistant . task . field . FieldList""" | if self . _fields is None :
self . _fields = FieldList ( self . _version , assistant_sid = self . _solution [ 'assistant_sid' ] , task_sid = self . _solution [ 'sid' ] , )
return self . _fields |
def present ( name , tablespace = None , encoding = None , lc_collate = None , lc_ctype = None , owner = None , owner_recurse = False , template = None , user = None , maintenance_db = None , db_password = None , db_host = None , db_port = None , db_user = None ) :
'''Ensure that the named database is present with the specified properties .
For more information about all of these options see man createdb ( 1)
name
The name of the database to manage
tablespace
Default tablespace for the database
encoding
The character encoding scheme to be used in this database
lc _ collate
The LC _ COLLATE setting to be used in this database
lc _ ctype
The LC _ CTYPE setting to be used in this database
owner
The username of the database owner
owner _ recurse
Recurse owner change to all relations in the database
template
The template database from which to build this database
user
System user all operations should be performed on behalf of
db _ user
database username if different from config or default
db _ password
user password if any password for a specified user
db _ host
Database host if different from config or default
db _ port
Database port if different from config or default
. . versionadded : : 0.17.0''' | ret = { 'name' : name , 'changes' : { } , 'result' : True , 'comment' : 'Database {0} is already present' . format ( name ) }
db_args = { 'maintenance_db' : maintenance_db , 'runas' : user , 'host' : db_host , 'user' : db_user , 'port' : db_port , 'password' : db_password , }
dbs = __salt__ [ 'postgres.db_list' ] ( ** db_args )
db_params = dbs . get ( name , { } )
if name in dbs and all ( ( db_params . get ( 'Tablespace' ) == tablespace if tablespace else True , ( db_params . get ( 'Encoding' ) . lower ( ) == encoding . lower ( ) if encoding else True ) , db_params . get ( 'Collate' ) == lc_collate if lc_collate else True , db_params . get ( 'Ctype' ) == lc_ctype if lc_ctype else True , db_params . get ( 'Owner' ) == owner if owner else True ) ) :
return ret
elif name in dbs and any ( ( db_params . get ( 'Encoding' ) . lower ( ) != encoding . lower ( ) if encoding else False , db_params . get ( 'Collate' ) != lc_collate if lc_collate else False , db_params . get ( 'Ctype' ) != lc_ctype if lc_ctype else False ) ) :
ret [ 'comment' ] = 'Database {0} has wrong parameters ' 'which couldn\'t be changed on fly.' . format ( name )
ret [ 'result' ] = False
return ret
# The database is not present , make it !
if __opts__ [ 'test' ] :
ret [ 'result' ] = None
if name not in dbs :
ret [ 'comment' ] = 'Database {0} is set to be created' . format ( name )
else :
ret [ 'comment' ] = 'Database {0} exists, but parameters ' 'need to be changed' . format ( name )
return ret
if ( name not in dbs and __salt__ [ 'postgres.db_create' ] ( name , tablespace = tablespace , encoding = encoding , lc_collate = lc_collate , lc_ctype = lc_ctype , owner = owner , template = template , ** db_args ) ) :
ret [ 'comment' ] = 'The database {0} has been created' . format ( name )
ret [ 'changes' ] [ name ] = 'Present'
elif ( name in dbs and __salt__ [ 'postgres.db_alter' ] ( name , tablespace = tablespace , owner = owner , owner_recurse = owner_recurse , ** db_args ) ) :
ret [ 'comment' ] = ( 'Parameters for database {0} have been changed' ) . format ( name )
ret [ 'changes' ] [ name ] = 'Parameters changed'
elif name in dbs :
ret [ 'comment' ] = ( 'Failed to change parameters for database {0}' ) . format ( name )
ret [ 'result' ] = False
else :
ret [ 'comment' ] = 'Failed to create database {0}' . format ( name )
ret [ 'result' ] = False
return ret |
def geo_field ( queryset ) :
"""Returns the GeometryField for a django or spillway GeoQuerySet .""" | for field in queryset . model . _meta . fields :
if isinstance ( field , models . GeometryField ) :
return field
raise exceptions . FieldDoesNotExist ( 'No GeometryField found' ) |
def get_taskfileinfo ( self , refobj ) :
"""Return the : class : ` jukeboxcore . filesys . TaskFileInfo ` that is loaded
by the refobj
: param refobj : the refobject to query
: type refobj : refobj
: returns : the taskfileinfo that is loaded in the scene
: rtype : : class : ` jukeboxcore . filesys . TaskFileInfo `
: raises : None""" | tf = self . get_taskfile ( refobj )
return TaskFileInfo . create_from_taskfile ( tf ) |
def get_media_detail_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_media_detail = ET . Element ( "get_media_detail" )
config = get_media_detail
input = ET . SubElement ( get_media_detail , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _process_plugin ( self , plugin ) :
'''Logic to handle each plugin that is active
@ param plugin : a plugin dict object''' | instance = plugin [ 'instance' ]
regex = plugin [ 'regex' ]
for key in self . redis_conn . scan_iter ( match = regex ) : # acquire lock
lock = self . _create_lock_object ( key )
try :
if lock . acquire ( blocking = False ) :
val = self . redis_conn . get ( key )
self . _process_key_val ( instance , key , val )
except Exception :
self . logger . error ( traceback . format_exc ( ) )
self . _increment_fail_stat ( '{k}:{v}' . format ( k = key , v = val ) )
self . _process_failures ( key )
# remove lock regardless of if exception or was handled ok
if lock . _held :
self . logger . debug ( "releasing lock" )
lock . release ( ) |
def get_arp_input_input_type_static_static ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_arp = ET . Element ( "get_arp" )
config = get_arp
input = ET . SubElement ( get_arp , "input" )
input_type = ET . SubElement ( input , "input-type" )
static = ET . SubElement ( input_type , "static" )
static = ET . SubElement ( static , "static" )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def csi ( self ) :
"""Gilbert ' s Score or Threat Score or Critical Success Index a / ( a + b + c )""" | return self . table [ 0 , 0 ] / ( self . table [ 0 , 0 ] + self . table [ 0 , 1 ] + self . table [ 1 , 0 ] ) |
def exitClient ( self ) :
"""Teardown button handler .""" | self . sendRtspRequest ( self . TEARDOWN )
# self . handler ( )
os . remove ( CACHE_FILE_NAME + str ( self . sessionId ) + CACHE_FILE_EXT )
# Delete the cache image from video
rate = float ( self . counter / self . frameNbr )
print ( '-' * 60 + "\nRTP Packet Loss Rate :" + str ( rate ) + "\n" + '-' * 60 )
sys . exit ( 0 ) |
def query_file ( self , path , fetchall = False , ** params ) :
"""Like Connection . query , but takes a filename to load a query from .""" | # If path doesn ' t exists
if not os . path . exists ( path ) :
raise IOError ( "File '{}' not found!" . format ( path ) )
# If it ' s a directory
if os . path . isdir ( path ) :
raise IOError ( "'{}' is a directory!" . format ( path ) )
# Read the given . sql file into memory .
with open ( path ) as f :
query = f . read ( )
# Defer processing to self . query method .
return self . query ( query = query , fetchall = fetchall , ** params ) |
def connection_made ( self ) :
"""Connection to peer handler .
We send bgp open message to peer and initialize related attributes .""" | assert self . state == BGP_FSM_CONNECT
# We have a connection with peer we send open message .
open_msg = self . _peer . create_open_msg ( )
self . _holdtime = open_msg . hold_time
self . state = BGP_FSM_OPEN_SENT
if not self . is_reactive :
self . _peer . state . bgp_state = self . state
self . sent_open_msg = open_msg
self . send ( open_msg )
self . _peer . connection_made ( ) |
def dlls_in_dir ( directory ) :
"""Returns * . dll , * . so , * . dylib in given directory .""" | files = [ ]
files . extend ( glob . glob ( os . path . join ( directory , '*.so' ) ) )
files . extend ( glob . glob ( os . path . join ( directory , '*.dll' ) ) )
files . extend ( glob . glob ( os . path . join ( directory , '*.dylib' ) ) )
return files |
def _ar_matrix ( self ) :
"""Creates Autoregressive matrix
Returns
X : np . ndarray
Autoregressive Matrix""" | Y = np . array ( self . data [ self . max_lag : self . data . shape [ 0 ] ] )
X = self . data [ ( self . max_lag - 1 ) : - 1 ]
if self . ar != 0 :
for i in range ( 1 , self . ar ) :
X = np . vstack ( ( X , self . data [ ( self . max_lag - i - 1 ) : - i - 1 ] ) )
return X |
def com_adobe_fonts_check_fsselection_matches_macstyle ( ttFont ) :
"""Check if OS / 2 fsSelection matches head macStyle bold and italic bits .""" | from fontbakery . constants import FsSelection , MacStyle
failed = False
head_bold = ( ttFont [ 'head' ] . macStyle & MacStyle . BOLD ) != 0
os2_bold = ( ttFont [ 'OS/2' ] . fsSelection & FsSelection . BOLD ) != 0
if head_bold != os2_bold :
failed = True
yield FAIL , "The OS/2.fsSelection and head.macStyle " "bold settings do not match."
head_italic = ( ttFont [ 'head' ] . macStyle & MacStyle . ITALIC ) != 0
os2_italic = ( ttFont [ 'OS/2' ] . fsSelection & FsSelection . ITALIC ) != 0
if head_italic != os2_italic :
failed = True
yield FAIL , "The OS/2.fsSelection and head.macStyle " "italic settings do not match."
if not failed :
yield PASS , "The OS/2.fsSelection and head.macStyle " "bold and italic settings match." |
def _get_match ( self , prefix ) :
"""Return the key that maps to this prefix .""" | # ( hard coded ) If we match a CPR response , return Keys . CPRResponse .
# ( This one doesn ' t fit in the ANSI _ SEQUENCES , because it contains
# integer variables . )
if _cpr_response_re . match ( prefix ) :
return Keys . CPRResponse
elif _mouse_event_re . match ( prefix ) :
return Keys . Vt100MouseEvent
# Otherwise , use the mappings .
try :
return ANSI_SEQUENCES [ prefix ]
except KeyError :
return None |
def _load_api ( self ) :
"""Add the routes for the scheduler API .""" | self . _add_url_route ( 'get_scheduler_info' , '' , api . get_scheduler_info , 'GET' )
self . _add_url_route ( 'add_job' , '/jobs' , api . add_job , 'POST' )
self . _add_url_route ( 'get_job' , '/jobs/<job_id>' , api . get_job , 'GET' )
self . _add_url_route ( 'get_jobs' , '/jobs' , api . get_jobs , 'GET' )
self . _add_url_route ( 'delete_job' , '/jobs/<job_id>' , api . delete_job , 'DELETE' )
self . _add_url_route ( 'update_job' , '/jobs/<job_id>' , api . update_job , 'PATCH' )
self . _add_url_route ( 'pause_job' , '/jobs/<job_id>/pause' , api . pause_job , 'POST' )
self . _add_url_route ( 'resume_job' , '/jobs/<job_id>/resume' , api . resume_job , 'POST' )
self . _add_url_route ( 'run_job' , '/jobs/<job_id>/run' , api . run_job , 'POST' ) |
def list_files ( dir_pathname , recursive = True , topdown = True , followlinks = False ) :
"""Enlists all the files using their absolute paths within the specified
directory , optionally recursively .
: param dir _ pathname :
The directory to traverse .
: param recursive :
` ` True ` ` for walking recursively through the directory tree ;
` ` False ` ` otherwise .
: param topdown :
Please see the documentation for : func : ` os . walk `
: param followlinks :
Please see the documentation for : func : ` os . walk `""" | for root , dirnames , filenames in walk ( dir_pathname , recursive , topdown , followlinks ) :
for filename in filenames :
yield absolute_path ( os . path . join ( root , filename ) ) |
def get_rewritten_query ( self ) :
"""Returns rewritten query or None ( if any )""" | rewrittenQuery = self . _extract ( _PartitionedQueryExecutionInfo . RewrittenQueryPath )
if rewrittenQuery is not None : # Hardcode formattable filter to true for now
rewrittenQuery = rewrittenQuery . replace ( '{documentdb-formattableorderbyquery-filter}' , 'true' )
return rewrittenQuery |
def closed_issues ( issues , after ) :
"""Yields closed issues ( closed after a given datetime ) given a list of issues .""" | logging . info ( 'finding closed issues after {}...' . format ( after ) )
seen = set ( )
for issue in issues :
if closed_issue ( issue , after ) and issue [ 'title' ] not in seen :
seen . add ( issue [ 'title' ] )
yield issue |
def draw ( self , cov , num_reals = 1 , names = None ) :
"""draw random realizations from a multivariate
Gaussian distribution
Parameters
cov : pyemu . Cov
covariance structure to draw from
num _ reals : int
number of realizations to generate
names : list
list of columns names to draw for . If None , values all names
are drawn""" | real_names = np . arange ( num_reals , dtype = np . int64 )
# make sure everything is cool WRT ordering
if names is not None :
vals = self . mean_values . loc [ names ]
cov = cov . get ( names )
elif self . names != cov . row_names :
names = get_common_elements ( self . names , cov . row_names )
vals = self . mean_values . loc [ names ]
cov = cov . get ( names )
else :
vals = self . mean_values
names = self . names
# generate random numbers
if cov . isdiagonal : # much faster
val_array = np . array ( [ np . random . normal ( mu , std , size = num_reals ) for mu , std in zip ( vals , np . sqrt ( cov . x ) ) ] ) . transpose ( )
else :
val_array = np . random . multivariate_normal ( vals , cov . as_2d , num_reals )
self . loc [ : , : ] = np . NaN
self . dropna ( inplace = True )
# this sucks - can only set by enlargement one row at a time
for rname , vals in zip ( real_names , val_array ) :
self . loc [ rname , names ] = vals
# set NaNs to mean _ values
idx = pd . isnull ( self . loc [ rname , : ] )
self . loc [ rname , idx ] = self . mean_values [ idx ] |
def run ( self ) :
"""Called by Sphinx .
: returns : ImgurEmbedNode and ImgurJavaScriptNode instances with config values passed as arguments .
: rtype : list""" | # Get Imgur ID .
imgur_id = self . arguments [ 0 ]
if not RE_IMGUR_ID . match ( imgur_id ) :
raise ImgurError ( 'Invalid Imgur ID specified. Must be 5-10 letters and numbers. Albums prefixed with "a/".' )
# Validate directive options .
if imgur_id . startswith ( 'a/' ) and self . options . get ( 'target_largest' , None ) :
raise ImgurError ( 'Imgur albums (whose covers are displayed) do not support :target_largest: option.' )
# Modify options .
if self . options . get ( 'width' , '' ) . isdigit ( ) :
self . options [ 'width' ] += 'px'
if self . options . get ( 'height' , '' ) . isdigit ( ) :
self . options [ 'height' ] += 'px'
# Read from conf . py . Unset gallery / largest / page targets if : target : is set .
if self . options . get ( 'target' , None ) :
self . options . pop ( 'target_gallery' , None )
self . options . pop ( 'target_largest' , None )
self . options . pop ( 'target_page' , None )
elif not any ( self . options . get ( 'target_' + i , None ) for i in ( 'gallery' , 'largest' , 'page' ) ) :
config = self . state . document . settings . env . config
self . options . setdefault ( 'target_gallery' , config . imgur_target_default_gallery )
self . options . setdefault ( 'target_largest' , config . imgur_target_default_largest )
self . options . setdefault ( 'target_page' , config . imgur_target_default_page )
return [ ImgurImageNode ( imgur_id , self . options ) ] |
def server_from_config ( config = None , server_class = None , additional_kwargs = None ) :
"""Gets a configured L { coilmq . server . StompServer } from specified config .
If ` config ` is None , global L { coilmq . config . config } var will be used instead .
The ` server _ class ` and ` additional _ kwargs ` are primarily hooks for using this method
from a testing environment .
@ param config : A C { ConfigParser . ConfigParser } instance with loaded config values .
@ type config : C { ConfigParser . ConfigParser }
@ param server _ class : Which class to use for the server . ( This doesn ' t come from config currently . )
@ type server _ class : C { class }
@ param additional _ kwargs : Any additional args that should be passed to class .
@ type additional _ kwargs : C { list }
@ return : The configured StompServer .
@ rtype : L { coilmq . server . StompServer }""" | global global_config
if not config :
config = global_config
queue_store_factory = resolve_name ( config . get ( 'coilmq' , 'qstore.factory' ) )
subscriber_scheduler_factory = resolve_name ( config . get ( 'coilmq' , 'scheduler.subscriber_priority_factory' ) )
queue_scheduler_factory = resolve_name ( config . get ( 'coilmq' , 'scheduler.queue_priority_factory' ) )
if config . has_option ( 'coilmq' , 'auth.factory' ) :
authenticator_factory = resolve_name ( config . get ( 'coilmq' , 'auth.factory' ) )
authenticator = authenticator_factory ( )
else :
authenticator = None
server = ThreadedStompServer ( ( config . get ( 'coilmq' , 'listen_addr' ) , config . getint ( 'coilmq' , 'listen_port' ) ) , queue_manager = QueueManager ( store = queue_store_factory ( ) , subscriber_scheduler = subscriber_scheduler_factory ( ) , queue_scheduler = queue_scheduler_factory ( ) ) , topic_manager = TopicManager ( ) , authenticator = authenticator , protocol = STOMP11 )
logger . info ( "Created server:%r" % server )
return server |
def save_sql_to_files ( overwrite = False ) :
"""Executes every . sql files in / data / scripts / using salic db vpn and
then saves pickle files into / data / raw /""" | ext_size = len ( SQL_EXTENSION )
path = DATA_PATH / 'scripts'
save_dir = DATA_PATH / "raw"
for file in os . listdir ( path ) :
if file . endswith ( SQL_EXTENSION ) :
file_path = os . path . join ( save_dir , file [ : - ext_size ] + '.' + FILE_EXTENSION )
if not os . path . isfile ( file_path ) or overwrite :
query_result = make_query ( path / file )
save_dataframe_as_pickle ( query_result , file_path )
else :
print ( ( "file {} already exists, if you would like to update" " it, use -f flag\n" ) . format ( file_path ) ) |
def WriteEventMACBGroup ( self , event_macb_group ) :
"""Writes an event MACB group to the output .
Args :
event _ macb _ group ( list [ EventObject ] ) : event MACB group .""" | output_values = self . _GetOutputValues ( event_macb_group [ 0 ] )
timestamp_descriptions = [ event . timestamp_desc for event in event_macb_group ]
output_values [ 3 ] = ( self . _output_mediator . GetMACBRepresentationFromDescriptions ( timestamp_descriptions ) )
# TODO : fix timestamp description in source .
output_values [ 6 ] = '; ' . join ( timestamp_descriptions )
self . _WriteOutputValues ( output_values ) |
def cpuinfo ( ) :
"""Get the cpu info""" | f = open ( "/proc/cpuinfo" )
hwinfo = { }
for line in f . readlines ( ) :
cpul = line . split ( ":" )
name = cpul [ 0 ] . strip ( )
if ( len ( cpul ) > 1 ) :
val = cpul [ 1 ] . strip ( )
if ( name == "model name" ) :
hwinfo [ "CPU" ] = val
elif ( name == "cpu MHz" ) :
hwinfo [ "MHz" ] = int ( round ( float ( val ) ) )
f . close ( )
return hwinfo |
def patch ( * args , ** kwargs ) :
'''Add patches to plot . The color of the patches is indexed according to a specified color - index .
: example :
Plot a finite element mesh : the outline of the undeformed configuration , and the deformed
configuration for which the elements get a color e . g . based on stress : :
import matplotlib . pyplot as plt
import goosempl as gplt
fig , ax = plt . subplots ( )
p = gplt . patch ( coor = coor + disp , conn = conn , axis = ax , cindex = stress , cmap = ' YlOrRd ' , edgecolor = None )
_ = gplt . patch ( coor = coor , conn = conn , axis = ax )
cbar = fig . colorbar ( p , axis = ax , aspect = 10)
plt . show ( )
: arguments - option 1/2:
* * patches * * ( ` ` < list > ` ` )
List with patch objects . Can be replaced by specifying ` ` coor ` ` and ` ` conn ` ` .
: arguments - option 2/2:
* * coor * * ( ` ` < numpy . ndarray > ` ` | ` ` < list > ` ` ( nested ) )
Matrix with on each row the coordinates ( positions ) of each node .
* * conn * * ( ` ` < numpy . ndarray > ` ` | ` ` < list > ` ` ( nested ) )
Matrix with on each row the number numbers ( rows in ` ` coor ` ` ) which form an element ( patch ) .
: options :
* * cindex * * ( ` ` < numpy . ndarray > ` ` )
Array with , for each patch , the value that should be indexed to a color .
* * axis * * ( ` ` < matplotlib > ` ` )
Specify an axis to include to plot in . By default the current axis is used .
* * autoscale * * ( [ ` ` True ` ` ] | ` ` False ` ` )
Automatically update the limits of the plot ( currently automatic limits of Collections are not
supported by matplotlib ) .
: recommended options :
* * cmap * * ( ` ` < str > ` ` | . . . )
Specify a colormap .
* * linewidth * * ( ` ` < float > ` ` )
Width of the edges .
* * edgecolor * * ( ` ` < str > ` ` | . . . )
Color of the edges .
* * clim * * ( ` ` ( < float > , < float > ) ` ` )
Lower and upper limit of the color - axis .
: returns :
* * handle * * ( ` ` < matplotlib > ` ` )
Handle of the patch objects .
. . seealso : :
* ` matplotlib example
< http : / / matplotlib . org / examples / api / patch _ collection . html > ` _ .''' | from matplotlib . collections import PatchCollection
from matplotlib . patches import Polygon
# check dependent options
if ( 'coor' not in kwargs or 'conn' not in kwargs ) :
raise IOError ( 'Specify both "coor" and "conn"' )
# extract local options
axis = kwargs . pop ( 'axis' , plt . gca ( ) )
cindex = kwargs . pop ( 'cindex' , None )
coor = kwargs . pop ( 'coor' , None )
conn = kwargs . pop ( 'conn' , None )
autoscale = kwargs . pop ( 'autoscale' , True )
# set defaults
kwargs . setdefault ( 'edgecolor' , 'k' )
# no color - index - > set transparent
if cindex is None :
kwargs . setdefault ( 'facecolor' , ( 0. , 0. , 0. , 0. ) )
# convert mesh - > list of Polygons
if coor is not None and conn is not None :
poly = [ ]
for iconn in conn :
poly . append ( Polygon ( coor [ iconn , : ] ) )
args = tuple ( poly , * args )
# convert patches - > matplotlib - objects
p = PatchCollection ( args , ** kwargs )
# add colors to patches
if cindex is not None :
p . set_array ( cindex )
# add patches to axis
axis . add_collection ( p )
# rescale the axes manually
if autoscale : # - get limits
xlim = [ np . min ( coor [ : , 0 ] ) , np . max ( coor [ : , 0 ] ) ]
ylim = [ np . min ( coor [ : , 1 ] ) , np . max ( coor [ : , 1 ] ) ]
# - set limits + / - 10 % extra margin
axis . set_xlim ( [ xlim [ 0 ] - .1 * ( xlim [ 1 ] - xlim [ 0 ] ) , xlim [ 1 ] + .1 * ( xlim [ 1 ] - xlim [ 0 ] ) ] )
axis . set_ylim ( [ ylim [ 0 ] - .1 * ( ylim [ 1 ] - ylim [ 0 ] ) , ylim [ 1 ] + .1 * ( ylim [ 1 ] - ylim [ 0 ] ) ] )
return p |
def make ( parser ) :
"""Ceph MON Daemon management""" | parser . formatter_class = ToggleRawTextHelpFormatter
mon_parser = parser . add_subparsers ( dest = 'subcommand' )
mon_parser . required = True
mon_add = mon_parser . add_parser ( 'add' , help = ( 'R|Add a monitor to an existing cluster:\n' '\tceph-deploy mon add node1\n' 'Or:\n' '\tceph-deploy mon add --address 192.168.1.10 node1\n' 'If the section for the monitor exists and defines a `mon addr` that\n' 'will be used, otherwise it will fallback by resolving the hostname to an\n' 'IP. If `--address` is used it will override all other options.' ) )
mon_add . add_argument ( '--address' , nargs = '?' , )
mon_add . add_argument ( 'mon' , nargs = 1 , )
mon_create = mon_parser . add_parser ( 'create' , help = ( 'R|Deploy monitors by specifying them like:\n' '\tceph-deploy mon create node1 node2 node3\n' 'If no hosts are passed it will default to use the\n' '`mon initial members` defined in the configuration.' ) )
mon_create . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , )
mon_create . add_argument ( 'mon' , nargs = '*' , )
mon_create_initial = mon_parser . add_parser ( 'create-initial' , help = ( 'Will deploy for monitors defined in `mon initial members`, ' 'wait until they form quorum and then gatherkeys, reporting ' 'the monitor status along the process. If monitors don\'t form ' 'quorum the command will eventually time out.' ) )
mon_create_initial . add_argument ( '--keyrings' , nargs = '?' , help = 'concatenate multiple keyrings to be seeded on new monitors' , )
mon_destroy = mon_parser . add_parser ( 'destroy' , help = 'Completely remove Ceph MON from remote host(s)' )
mon_destroy . add_argument ( 'mon' , nargs = '+' , )
parser . set_defaults ( func = mon , ) |
def combine_kwargs ( ** kwargs ) :
"""Flatten a series of keyword arguments from complex combinations of
dictionaries and lists into a list of tuples representing
properly - formatted parameters to pass to the Requester object .
: param kwargs : A dictionary containing keyword arguments to be
flattened into properly - formatted parameters .
: type kwargs : dict
: returns : A list of tuples that represent flattened kwargs . The
first element is a string representing the key . The second
element is the value .
: rtype : ` list ` of ` tuple `""" | combined_kwargs = [ ]
# Loop through all kwargs provided
for kw , arg in kwargs . items ( ) :
if isinstance ( arg , dict ) :
for k , v in arg . items ( ) :
for tup in flatten_kwarg ( k , v ) :
combined_kwargs . append ( ( '{}{}' . format ( kw , tup [ 0 ] ) , tup [ 1 ] ) )
elif is_multivalued ( arg ) :
for i in arg :
for tup in flatten_kwarg ( '' , i ) :
combined_kwargs . append ( ( '{}{}' . format ( kw , tup [ 0 ] ) , tup [ 1 ] ) )
else :
combined_kwargs . append ( ( text_type ( kw ) , arg ) )
return combined_kwargs |
def dphi_fc ( fdata ) :
"""Apply phi derivative in the Fourier domain .""" | nrows = fdata . shape [ 0 ]
ncols = fdata . shape [ 1 ]
B = int ( ncols / 2 )
# As always , we assume nrows and ncols are even
a = list ( range ( 0 , int ( B ) ) )
ap = list ( range ( - int ( B ) , 0 ) )
a . extend ( ap )
dphi = np . zeros ( [ nrows , ncols ] , np . complex128 )
for k in xrange ( 0 , nrows ) :
dphi [ k , : ] = a
fdata [ : , : ] = 1j * dphi * fdata |
def custom_observable_properties_prefix_strict ( instance ) :
"""Ensure observable object custom properties follow strict naming style
conventions .""" | for key , obj in instance [ 'objects' ] . items ( ) :
if 'type' not in obj :
continue
type_ = obj [ 'type' ]
for prop in obj : # Check objects ' properties
if ( type_ in enums . OBSERVABLE_PROPERTIES and prop not in enums . OBSERVABLE_PROPERTIES [ type_ ] and not CUSTOM_PROPERTY_PREFIX_RE . match ( prop ) ) :
yield JSONError ( "Cyber Observable Object custom property '%s' " "should start with 'x_' followed by a source " "unique identifier (like a domain name with " "dots replaced by hyphens), a hyphen and then the" " name." % prop , instance [ 'id' ] , 'custom-prefix' )
# Check properties of embedded cyber observable types
if ( type_ in enums . OBSERVABLE_EMBEDDED_PROPERTIES and prop in enums . OBSERVABLE_EMBEDDED_PROPERTIES [ type_ ] ) :
for embed_prop in obj [ prop ] :
if isinstance ( embed_prop , dict ) :
for embedded in embed_prop :
if ( embedded not in enums . OBSERVABLE_EMBEDDED_PROPERTIES [ type_ ] [ prop ] and not CUSTOM_PROPERTY_PREFIX_RE . match ( embedded ) ) :
yield JSONError ( "Cyber Observable Object custom " "property '%s' in the %s property of " "%s object should start with 'x_' " "followed by a source unique " "identifier (like a domain name with " "dots replaced by hyphens), a hyphen and " "then the name." % ( embedded , prop , type_ ) , instance [ 'id' ] , 'custom-prefix' )
elif ( embed_prop not in enums . OBSERVABLE_EMBEDDED_PROPERTIES [ type_ ] [ prop ] and not CUSTOM_PROPERTY_PREFIX_RE . match ( embed_prop ) ) :
yield JSONError ( "Cyber Observable Object custom " "property '%s' in the %s property of " "%s object should start with 'x_' " "followed by a source unique " "identifier (like a domain name with " "dots replaced by hyphens), a hyphen and " "then the name." % ( embed_prop , prop , type_ ) , instance [ 'id' ] , 'custom-prefix' )
# Check object extensions ' properties
if ( type_ in enums . OBSERVABLE_EXTENSIONS and 'extensions' in obj ) :
for ext_key in obj [ 'extensions' ] :
if ext_key in enums . OBSERVABLE_EXTENSIONS [ type_ ] :
for ext_prop in obj [ 'extensions' ] [ ext_key ] :
if ( ext_prop not in enums . OBSERVABLE_EXTENSION_PROPERTIES [ ext_key ] and not CUSTOM_PROPERTY_PREFIX_RE . match ( ext_prop ) ) :
yield JSONError ( "Cyber Observable Object custom " "property '%s' in the %s extension " "should start with 'x_' followed by a " "source unique identifier (like a " "domain name with dots replaced by " "hyphens), a hyphen and then the name." % ( ext_prop , ext_key ) , instance [ 'id' ] , 'custom-prefix' )
if ext_key in enums . OBSERVABLE_EXTENSIONS [ type_ ] :
for ext_prop in obj [ 'extensions' ] [ ext_key ] :
if ( ext_key in enums . OBSERVABLE_EXTENSION_EMBEDDED_PROPERTIES and ext_prop in enums . OBSERVABLE_EXTENSION_EMBEDDED_PROPERTIES [ ext_key ] ) :
for embed_prop in obj [ 'extensions' ] [ ext_key ] [ ext_prop ] :
if not ( isinstance ( embed_prop , Iterable ) and not isinstance ( embed_prop , string_types ) ) :
embed_prop = [ embed_prop ]
for p in embed_prop :
if ( p not in enums . OBSERVABLE_EXTENSION_EMBEDDED_PROPERTIES [ ext_key ] [ ext_prop ] and not CUSTOM_PROPERTY_PREFIX_RE . match ( p ) ) :
yield JSONError ( "Cyber Observable Object " "custom property '%s' in the %s " "property of the %s extension should " "start with 'x_' followed by a source " "unique identifier (like a domain name" " with dots replaced by hyphens), a " "hyphen and then the name." % ( p , ext_prop , ext_key ) , instance [ 'id' ] , 'custom-prefix' ) |
def _sign_string ( message , private_key_file = None , private_key_string = None ) :
"""Signs a string for use with Amazon CloudFront . Requires the M2Crypto
library be installed .""" | try :
from M2Crypto import EVP
except ImportError :
raise NotImplementedError ( "Boto depends on the python M2Crypto " "library to generate signed URLs for " "CloudFront" )
# Make sure only one of private _ key _ file and private _ key _ string is set
if private_key_file and private_key_string :
raise ValueError ( "Only specify the private_key_file or the private_key_string not both" )
if not private_key_file and not private_key_string :
raise ValueError ( "You must specify one of private_key_file or private_key_string" )
# if private _ key _ file is a file object read the key string from there
if isinstance ( private_key_file , file ) :
private_key_string = private_key_file . read ( )
# Now load key and calculate signature
if private_key_string :
key = EVP . load_key_string ( private_key_string )
else :
key = EVP . load_key ( private_key_file )
key . reset_context ( md = 'sha1' )
key . sign_init ( )
key . sign_update ( str ( message ) )
signature = key . sign_final ( )
return signature |
def to_masked_array ( self , copy = True ) :
"""Convert this array into a numpy . ma . MaskedArray
Parameters
copy : bool
If True ( default ) make a copy of the array in the result . If False ,
a MaskedArray view of DataArray . values is returned .
Returns
result : MaskedArray
Masked where invalid values ( nan or inf ) occur .""" | isnull = pd . isnull ( self . values )
return np . ma . MaskedArray ( data = self . values , mask = isnull , copy = copy ) |
def parseSearchTerm ( term ) :
"""Turn a string search query into a two - tuple of a search term and a
dictionary of search keywords .""" | terms = [ ]
keywords = { }
for word in term . split ( ) :
if word . count ( ':' ) == 1 :
k , v = word . split ( u':' )
if k and v :
keywords [ k ] = v
elif k or v :
terms . append ( k or v )
else :
terms . append ( word )
term = u' ' . join ( terms )
if keywords :
return term , keywords
return term , None |
def shuffle ( seq , random = None ) :
r"""Return shuffled * copy * of ` seq ` .""" | if isinstance ( seq , list ) :
return ipshuffle ( seq [ : ] , random )
elif isString ( seq ) : # seq [ 0:0 ] = = " " or u " "
return seq [ 0 : 0 ] . join ( ipshuffle ( list ( seq ) ) , random )
else :
return type ( seq ) ( ipshuffle ( list ( seq ) , random ) ) |
def reset_stats_history ( self ) :
"""Reset the stats history ( dict of GlancesAttribute ) .""" | if self . history_enable ( ) :
reset_list = [ a [ 'name' ] for a in self . get_items_history_list ( ) ]
logger . debug ( "Reset history for plugin {} (items: {})" . format ( self . plugin_name , reset_list ) )
self . stats_history . reset ( ) |
def popitem ( self ) :
"""Remove and return an item .""" | key , value = super ( LFUCache , self ) . popitem ( )
return ( key , value [ 1 ] ) |
def render_python_template_to ( src , dest , subsd , only_update = False , prev_subsd = None , create_dest_dirs = True , logger = None ) :
"""Overload this function if you want to use a template engine such as
e . g . mako .""" | if only_update :
if subsd == prev_subsd :
if not missing_or_other_newer ( dest , src ) :
if logger :
msg = ( "Did not re-render {}. " "(destination newer + same dict)" )
logger . info ( msg . format ( src ) )
return
with open ( src , 'rt' ) as ifh :
data = ifh . read ( )
# Don ' t go crazy on file size . . .
if create_dest_dirs :
dest_dir = os . path . dirname ( dest )
if not os . path . exists ( dest_dir ) :
make_dirs ( dest_dir )
with open ( dest , 'wt' ) as ofh :
ofh . write ( data % subsd ) |
def clip ( layer_to_clip , mask_layer ) :
"""Clip a vector layer with another .
Issue https : / / github . com / inasafe / inasafe / issues / 3186
: param layer _ to _ clip : The vector layer to clip .
: type layer _ to _ clip : QgsVectorLayer
: param mask _ layer : The vector layer to use for clipping .
: type mask _ layer : QgsVectorLayer
: return : The clip vector layer .
: rtype : QgsVectorLayer
. . versionadded : : 4.0""" | output_layer_name = clip_steps [ 'output_layer_name' ]
output_layer_name = output_layer_name % ( layer_to_clip . keywords [ 'layer_purpose' ] )
parameters = { 'INPUT' : layer_to_clip , 'OVERLAY' : mask_layer , 'OUTPUT' : 'memory:' }
# TODO implement callback through QgsProcessingFeedback object
initialize_processing ( )
feedback = create_processing_feedback ( )
context = create_processing_context ( feedback = feedback )
result = processing . run ( 'native:clip' , parameters , context = context )
if result is None :
raise ProcessingInstallationError
clipped = result [ 'OUTPUT' ]
clipped . setName ( output_layer_name )
clipped . keywords = layer_to_clip . keywords . copy ( )
clipped . keywords [ 'title' ] = output_layer_name
check_layer ( clipped )
return clipped |
def translate ( self , vector , inc_alt_states = True ) :
"""Translates every atom in the AMPAL object .
Parameters
vector : 3D Vector ( tuple , list , numpy . array )
Vector used for translation .
inc _ alt _ states : bool , optional
If true , will rotate atoms in all states i . e . includes
alternate conformations for sidechains .""" | vector = numpy . array ( vector )
for atom in self . get_atoms ( inc_alt_states = inc_alt_states ) :
atom . _vector += vector
return |
def get_ordered_tokens_from_vocab ( vocab : Vocab ) -> List [ str ] :
"""Returns the list of tokens in a vocabulary , ordered by increasing vocabulary id .
: param vocab : Input vocabulary .
: return : List of tokens .""" | return [ token for token , token_id in sorted ( vocab . items ( ) , key = lambda i : i [ 1 ] ) ] |
def save_account ( message , collection = DATABASE . account ) :
"""save account
Arguments :
message { [ type ] } - - [ description ]
Keyword Arguments :
collection { [ type ] } - - [ description ] ( default : { DATABASE } )""" | try :
collection . create_index ( [ ( "account_cookie" , ASCENDING ) , ( "user_cookie" , ASCENDING ) , ( "portfolio_cookie" , ASCENDING ) ] , unique = True )
except :
pass
collection . update ( { 'account_cookie' : message [ 'account_cookie' ] , 'portfolio_cookie' : message [ 'portfolio_cookie' ] , 'user_cookie' : message [ 'user_cookie' ] } , { '$set' : message } , upsert = True ) |
def canonicalize ( parsed_op ) :
"""Get the " canonical form " of this operation , putting it into a form where it can be serialized
to form a consensus hash . This method is meant to preserve compatibility across blockstackd releases .
For NAME _ TRANSFER , this means :
* add ' keep _ data ' flag""" | assert 'op' in parsed_op
assert len ( parsed_op [ 'op' ] ) == 2
if parsed_op [ 'op' ] [ 1 ] == TRANSFER_KEEP_DATA :
parsed_op [ 'keep_data' ] = True
elif parsed_op [ 'op' ] [ 1 ] == TRANSFER_REMOVE_DATA :
parsed_op [ 'keep_data' ] = False
else :
raise ValueError ( "Invalid op '{}'" . format ( parsed_op [ 'op' ] ) )
return parsed_op |
def start ( st_reg_number ) :
"""Checks the number valiaty for the Sergipe state""" | divisor = 11
if len ( st_reg_number ) > 9 :
return False
if len ( st_reg_number ) < 9 :
return False
sum_total = 0
peso = 9
for i in range ( len ( st_reg_number ) - 1 ) :
sum_total = sum_total + int ( st_reg_number [ i ] ) * peso
peso = peso - 1
rest_division = sum_total % divisor
digit = divisor - rest_division
if digit == 10 or digit == 11 :
digit = 0
return digit == int ( st_reg_number [ len ( st_reg_number ) - 1 ] ) |
def replacing_symlink ( source , link_name ) :
"""Create symlink that overwrites any existing target .""" | with make_tmp_name ( link_name ) as tmp_link_name :
os . symlink ( source , tmp_link_name )
replace_file_or_dir ( link_name , tmp_link_name ) |
def useragent ( self , value ) :
"""gets / sets the user agent value""" | if value is None :
self . _useragent = "Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0"
elif self . _useragent != value :
self . _useragent = value |
def create_role ( self , role_name , role_type , host_id ) :
"""Create a role .
@ param role _ name : Role name
@ param role _ type : Role type
@ param host _ id : ID of the host to assign the role to
@ return : An ApiRole object""" | return roles . create_role ( self . _get_resource_root ( ) , self . name , role_type , role_name , host_id , self . _get_cluster_name ( ) ) |
def _xml_to_dict ( xml ) :
'''Helper function to covert xml into a data dictionary .
xml
The xml data to convert .''' | dicts = { }
for item in xml :
key = item . tag . lower ( )
idx = 1
while key in dicts :
key += six . text_type ( idx )
idx += 1
if item . text is None :
dicts [ key ] = _xml_to_dict ( item )
else :
dicts [ key ] = item . text
return dicts |
def rts_smooth ( kalman_filter , state_count = None ) :
"""Compute the Rauch - Tung - Striebel smoothed state estimates and estimate
covariances for a Kalman filter .
Args :
kalman _ filter ( KalmanFilter ) : Filter whose smoothed states should be
returned
state _ count ( int or None ) : Number of smoothed states to return .
If None , use ` ` kalman _ filter . state _ count ` ` .
Returns :
( list of MultivariateNormal ) : List of multivariate normal distributions .
The mean of the distribution is the estimated state and the covariance
is the covariance of the estimate .""" | if state_count is None :
state_count = kalman_filter . state_count
state_count = int ( state_count )
if state_count < 0 or state_count > kalman_filter . state_count :
raise ValueError ( "Invalid state count: {}" . format ( state_count ) )
# No states to return ?
if state_count == 0 :
return [ ]
# Initialise with final posterior estimate
states = [ None ] * state_count
states [ - 1 ] = kalman_filter . posterior_state_estimates [ - 1 ]
priors = kalman_filter . prior_state_estimates
posteriors = kalman_filter . posterior_state_estimates
# Work backwards from final state
for k in range ( state_count - 2 , - 1 , - 1 ) :
process_mat = kalman_filter . process_matrices [ k + 1 ]
cmat = posteriors [ k ] . cov . dot ( process_mat . T ) . dot ( np . linalg . inv ( priors [ k + 1 ] . cov ) )
# Calculate smoothed state and covariance
states [ k ] = MultivariateNormal ( mean = posteriors [ k ] . mean + cmat . dot ( states [ k + 1 ] . mean - priors [ k + 1 ] . mean ) , cov = posteriors [ k ] . cov + cmat . dot ( states [ k + 1 ] . cov - priors [ k + 1 ] . cov ) . dot ( cmat . T ) )
return states |
def _apply_hard_disk ( unit_number , key , operation , disk_label = None , size = None , unit = 'GB' , controller_key = None , thin_provision = None , eagerly_scrub = None , datastore = None , filename = None ) :
'''Returns a vim . vm . device . VirtualDeviceSpec object specifying to add / edit
a virtual disk device
unit _ number
Add network adapter to this address
key
Device key number
operation
Action which should be done on the device add or edit
disk _ label
Label of the new disk , can be overridden
size
Size of the disk
unit
Unit of the size , can be GB , MB , KB
controller _ key
Unique umber of the controller key
thin _ provision
Boolean for thin provision
eagerly _ scrub
Boolean for eagerly scrubbing
datastore
Datastore name where the disk will be located
filename
Full file name of the vm disk''' | log . trace ( 'Configuring hard disk %s size=%s, unit=%s, controller_key=%s, ' 'thin_provision=%s, eagerly_scrub=%s, datastore=%s, filename=%s' , disk_label , size , unit , controller_key , thin_provision , eagerly_scrub , datastore , filename )
disk_spec = vim . vm . device . VirtualDeviceSpec ( )
disk_spec . device = vim . vm . device . VirtualDisk ( )
disk_spec . device . key = key
disk_spec . device . unitNumber = unit_number
disk_spec . device . deviceInfo = vim . Description ( )
if size :
convert_size = salt . utils . vmware . convert_to_kb ( unit , size )
disk_spec . device . capacityInKB = convert_size [ 'size' ]
if disk_label :
disk_spec . device . deviceInfo . label = disk_label
if thin_provision is not None or eagerly_scrub is not None :
disk_spec . device . backing = vim . vm . device . VirtualDisk . FlatVer2BackingInfo ( )
disk_spec . device . backing . diskMode = 'persistent'
if thin_provision is not None :
disk_spec . device . backing . thinProvisioned = thin_provision
if eagerly_scrub is not None and eagerly_scrub != 'None' :
disk_spec . device . backing . eagerlyScrub = eagerly_scrub
if controller_key :
disk_spec . device . controllerKey = controller_key
if operation == 'add' :
disk_spec . operation = vim . vm . device . VirtualDeviceSpec . Operation . add
disk_spec . device . backing . fileName = '[{0}] {1}' . format ( salt . utils . vmware . get_managed_object_name ( datastore ) , filename )
disk_spec . fileOperation = vim . vm . device . VirtualDeviceSpec . FileOperation . create
elif operation == 'edit' :
disk_spec . operation = vim . vm . device . VirtualDeviceSpec . Operation . edit
return disk_spec |
def update_cors_configuration ( self , enable_cors = True , allow_credentials = True , origins = None , overwrite_origins = False ) :
"""Merges existing CORS configuration with updated values .
: param bool enable _ cors : Enables / disables CORS . Defaults to True .
: param bool allow _ credentials : Allows authentication credentials .
Defaults to True .
: param list origins : List of allowed CORS origin ( s ) . Special cases are
a list containing a single " * " which will allow any origin and
an empty list which will not allow any origin . Defaults to None .
: param bool overwrite _ origins : Dictates whether the origins list is
overwritten of appended to . Defaults to False .
: returns : CORS configuration update status in JSON format""" | if origins is None :
origins = [ ]
cors_config = { 'enable_cors' : enable_cors , 'allow_credentials' : allow_credentials , 'origins' : origins }
if overwrite_origins :
return self . _write_cors_configuration ( cors_config )
old_config = self . cors_configuration ( )
# update config values
updated_config = old_config . copy ( )
updated_config [ 'enable_cors' ] = cors_config . get ( 'enable_cors' )
updated_config [ 'allow_credentials' ] = cors_config . get ( 'allow_credentials' )
if cors_config . get ( 'origins' ) == [ "*" ] :
updated_config [ 'origins' ] = [ "*" ]
elif old_config . get ( 'origins' ) != cors_config . get ( 'origins' ) :
new_origins = list ( set ( old_config . get ( 'origins' ) ) . union ( set ( cors_config . get ( 'origins' ) ) ) )
updated_config [ 'origins' ] = new_origins
return self . _write_cors_configuration ( updated_config ) |
def list_filepaths ( self , wildcard = None ) :
"""Return the list of absolute filepaths in the directory .
Args :
wildcard : String of tokens separated by " | " . Each token represents a pattern .
If wildcard is not None , we return only those files that match the given shell pattern ( uses fnmatch ) .
Example :
wildcard = " * . nc | * . pdf " selects only those files that end with . nc or . pdf""" | # Select the files in the directory .
fnames = [ f for f in os . listdir ( self . path ) ]
filepaths = filter ( os . path . isfile , [ os . path . join ( self . path , f ) for f in fnames ] )
# Filter using the shell patterns .
if wildcard is not None :
filepaths = WildCard ( wildcard ) . filter ( filepaths )
return filepaths |
def get_params_from_page ( path , file_name , method_count ) :
"""This function accesses the rendered content .
We must do this because how the params are not defined in the docs ,
but rather the rendered HTML""" | # open the rendered file .
file_name = file_name . replace ( ".rst" , "" )
file_path = "{0}/../_build/html/endpoints/{1}/index.html" . format ( path , file_name )
soup = bs4 . BeautifulSoup ( open ( file_path ) )
# Pull out the relevant section
section = soup . find_all ( 'div' , class_ = 'section' ) [ method_count ]
# get the tbody of the params table
tbody = section . find ( 'tbody' )
params = [ ]
if tbody is not None :
for row in tbody . find_all ( 'tr' ) :
name , param_type , required , description = row . find_all ( 'td' )
required = required . text == 'Yes'
param = dict ( name = name . text , type = param_type . text , required = required , description = description . text )
params . append ( param )
params = sorted ( params , key = lambda k : not k [ 'required' ] )
return params |
def delete_tags ( filesystemid , tags , keyid = None , key = None , profile = None , region = None , ** kwargs ) :
'''Deletes the specified tags from a file system .
filesystemid
( string ) - ID of the file system for whose tags will be removed .
tags
( list [ string ] ) - The tag keys to delete to the file system
CLI Example :
. . code - block : : bash
salt ' my - minion ' boto _ efs . delete _ tags''' | client = _get_conn ( key = key , keyid = keyid , profile = profile , region = region )
client . delete_tags ( FileSystemId = filesystemid , Tags = tags ) |
def undo_sign_in ( entry , session = None ) :
"""Delete a signed in entry .
: param entry : ` models . Entry ` object . The entry to delete .
: param session : ( optional ) SQLAlchemy session through which to access the database .""" | # noqa
if session is None :
session = Session ( )
else :
session = session
entry_to_delete = ( session . query ( Entry ) . filter ( Entry . uuid == entry . uuid ) . one_or_none ( ) )
if entry_to_delete :
logger . info ( 'Undo sign in: {}' . format ( entry_to_delete . user_id ) )
logger . debug ( 'Undo sign in: {}' . format ( entry_to_delete ) )
session . delete ( entry_to_delete )
session . commit ( )
else :
error_message = 'Entry not found: {}' . format ( entry )
logger . error ( error_message )
raise ValueError ( error_message ) |
def stratified_resample ( weights ) :
"""Performs the stratified resampling algorithm used by particle filters .
This algorithms aims to make selections relatively uniformly across the
particles . It divides the cumulative sum of the weights into N equal
divisions , and then selects one particle randomly from each division . This
guarantees that each sample is between 0 and 2 / N apart .
Parameters
weights : list - like of float
list of weights as floats
Returns
indexes : ndarray of ints
array of indexes into the weights defining the resample . i . e . the
index of the zeroth resample is indexes [ 0 ] , etc .""" | N = len ( weights )
# make N subdivisions , and chose a random position within each one
positions = ( random ( N ) + range ( N ) ) / N
indexes = np . zeros ( N , 'i' )
cumulative_sum = np . cumsum ( weights )
i , j = 0 , 0
while i < N :
if positions [ i ] < cumulative_sum [ j ] :
indexes [ i ] = j
i += 1
else :
j += 1
return indexes |
def post ( self , ** kwargs ) :
"""Send a POST request to the currently loaded website ' s URL .
The browser will automatically fill out the form . If ` data ` dict has
been passed into ` ` kwargs ` ` , the contained input values will override
the automatically filled out values .
Returns :
` Response ` object of a successful request .
Raises :
NoWebsiteLoadedError : If no website is currently loaded .""" | if self . _url is None :
raise NoWebsiteLoadedError ( 'request submission requires a loaded website' )
data = kwargs . get ( 'data' , { } )
for i in self . soup ( 'form' ) . select ( 'input[name]' ) :
if i . get ( 'name' ) not in data :
data [ i . get ( 'name' ) ] = i . get ( 'value' , '' )
kwargs [ 'data' ] = data
response = self . session . post ( self . _url , ** kwargs )
self . _url = response . url
self . _response = response
return response |
def execute_managed_notebook ( cls , nb_man , kernel_name , log_output = False , start_timeout = 60 , execution_timeout = None , ** kwargs ) :
"""Performs the actual execution of the parameterized notebook locally .
Args :
nb ( NotebookNode ) : Executable notebook object .
kernel _ name ( str ) : Name of kernel to execute the notebook against .
log _ output ( bool ) : Flag for whether or not to write notebook output to stderr .
start _ timeout ( int ) : Duration to wait for kernel start - up .
execution _ timeout ( int ) : Duration to wait before failing execution ( default : never ) .
Note : The preprocessor concept in this method is similar to what is used
by ` nbconvert ` , and it is somewhat misleading here . The preprocesser
represents a notebook processor , not a preparation object .""" | preprocessor = PapermillExecutePreprocessor ( timeout = execution_timeout , startup_timeout = start_timeout , kernel_name = kernel_name , log = logger , )
preprocessor . log_output = log_output
preprocessor . preprocess ( nb_man , kwargs ) |
def _step ( self , actions ) :
"""Takes a step in all environments , shouldn ' t pre - process or record .
Subclasses should override this to do the actual step if something other
than the default implementation is desired .
Args :
actions : ( np . ndarray ) with first dimension equal to the batch size .
Returns :
a tuple of stacked raw observations , raw rewards , dones and infos .""" | # Pre - conditions : common _ preconditions , see ` assert _ common _ preconditions ` .
# : len ( actions ) = = len ( self . _ envs )
self . assert_common_preconditions ( )
assert len ( actions ) == len ( self . _envs )
observations = [ ]
rewards = [ ]
dones = [ ]
infos = [ ]
# Take steps in all environments .
for env , action in zip ( self . _envs , actions ) :
observation , reward , done , info = env . step ( action )
observations . append ( observation )
rewards . append ( reward )
dones . append ( done )
infos . append ( info )
# Convert each list ( observations , rewards , . . . ) into np . array and return a
# tuple .
return tuple ( map ( np . stack , [ observations , rewards , dones , infos ] ) ) |
def module_entry ( yfile ) :
"""Add entry for one file containing YANG module text .
Args :
yfile ( file ) : File containing a YANG module or submodule .""" | ytxt = yfile . read ( )
mp = ModuleParser ( ytxt )
mst = mp . statement ( )
submod = mst . keyword == "submodule"
import_only = True
rev = ""
features = [ ]
includes = [ ]
rec = { }
for sst in mst . substatements :
if not rev and sst . keyword == "revision" :
rev = sst . argument
elif import_only and sst . keyword in data_kws :
import_only = False
elif sst . keyword == "feature" :
features . append ( sst . argument )
elif submod :
continue
elif sst . keyword == "namespace" :
rec [ "namespace" ] = sst . argument
elif sst . keyword == "include" :
rd = sst . find1 ( "revision-date" )
includes . append ( ( sst . argument , rd . argument if rd else None ) )
rec [ "import-only" ] = import_only
rec [ "features" ] = features
if submod :
rec [ "revision" ] = rev
submodmap [ mst . argument ] = rec
else :
rec [ "includes" ] = includes
modmap [ ( mst . argument , rev ) ] = rec |
def _createStructure ( self , linkResult , replaceParamFile ) :
"""Create GSSHAPY Structure Objects Method""" | # Constants
WEIRS = ( 'WEIR' , 'SAG_WEIR' )
CULVERTS = ( 'ROUND_CULVERT' , 'RECT_CULVERT' )
CURVES = ( 'RATING_CURVE' , 'SCHEDULED_RELEASE' , 'RULE_CURVE' )
header = linkResult [ 'header' ]
# Initialize GSSHAPY StreamLink object
link = StreamLink ( linkNumber = header [ 'link' ] , type = linkResult [ 'type' ] , numElements = header [ 'numstructs' ] )
# Associate StreamLink with ChannelInputFile
link . channelInputFile = self
# Create Structure objects
for s in linkResult [ 'structures' ] :
structType = s [ 'structtype' ]
# Cases
if structType in WEIRS : # Weir type handler
# Initialize GSSHAPY Weir object
weir = Weir ( type = structType , crestLength = vrp ( s [ 'crest_length' ] , replaceParamFile ) , crestLowElevation = vrp ( s [ 'crest_low_elev' ] , replaceParamFile ) , dischargeCoeffForward = vrp ( s [ 'discharge_coeff_forward' ] , replaceParamFile ) , dischargeCoeffReverse = vrp ( s [ 'discharge_coeff_reverse' ] , replaceParamFile ) , crestLowLocation = vrp ( s [ 'crest_low_loc' ] , replaceParamFile ) , steepSlope = vrp ( s [ 'steep_slope' ] , replaceParamFile ) , shallowSlope = vrp ( s [ 'shallow_slope' ] , replaceParamFile ) )
# Associate Weir with StreamLink
weir . streamLink = link
elif structType in CULVERTS : # Culvert type handler
# Initialize GSSHAPY Culvert object
culvert = Culvert ( type = structType , upstreamInvert = vrp ( s [ 'upinvert' ] , replaceParamFile ) , downstreamInvert = vrp ( s [ 'downinvert' ] , replaceParamFile ) , inletDischargeCoeff = vrp ( s [ 'inlet_disch_coeff' ] , replaceParamFile ) , reverseFlowDischargeCoeff = vrp ( s [ 'rev_flow_disch_coeff' ] , replaceParamFile ) , slope = vrp ( s [ 'slope' ] , replaceParamFile ) , length = vrp ( s [ 'length' ] , replaceParamFile ) , roughness = vrp ( s [ 'rough_coeff' ] , replaceParamFile ) , diameter = vrp ( s [ 'diameter' ] , replaceParamFile ) , width = vrp ( s [ 'width' ] , replaceParamFile ) , height = vrp ( s [ 'height' ] , replaceParamFile ) )
# Associate Culvert with StreamLink
culvert . streamLink = link
elif structType in CURVES : # Curve type handler
pass
return link |
def absolute_url ( relative_url ) :
"""Returns an absolute URL from a URL relative to the server root .
The base URL is taken from the Flask app config if present , otherwise it
falls back to ` ` http : / / inspirehep . net ` ` .""" | default_server = 'http://inspirehep.net'
server = current_app . config . get ( 'SERVER_NAME' , default_server )
if not re . match ( '^https?://' , server ) :
server = u'http://{}' . format ( server )
return urllib . parse . urljoin ( server , relative_url ) |
def create ( buildout_directory , buildout_extends ) :
"""Create buildout directory""" | # Resolve arguments
directory = get_buildout_directory ( buildout_directory )
extends = get_buildout_extends ( buildout_extends )
# Create buildout directory
local ( 'mkdir -p {0:s}' . format ( directory ) )
# Create buildout . cfg
filename = os . path . join ( directory , 'buildout.cfg' )
contents = """\
[buildout]
extends = {0:s}
""" . format ( extends )
# Write buildout . cfg
with NamedTemporaryFile ( ) as output :
print ( "[localhost] create: {0:s}" . format ( output . name ) )
output . write ( contents )
output . flush ( )
local ( 'cp {0:s} {1:s}' . format ( output . name , filename ) )
local ( 'chmod a+r {0:s}' . format ( filename ) ) |
def noise_op ( latents , hparams ) :
"""Adds isotropic gaussian - noise to each latent .
Args :
latents : 4 - D or 5 - D tensor , shape = ( NTHWC ) or ( NHWC ) .
hparams : HParams .
Returns :
latents : latents with isotropic gaussian noise appended .""" | if hparams . latent_noise == 0 or hparams . mode != tf . estimator . ModeKeys . TRAIN :
return latents
latent_shape = common_layers . shape_list ( latents )
return latents + tf . random_normal ( latent_shape , stddev = hparams . latent_noise ) |
def _handle_request ( self , request ) :
"""Finds the resource to which a request maps and then calls it .
Instantiates , fills and returns a : class : ` webob . Response ` object . If
no resource matches the request , a 404 status is set on the response
object .
: param request : Object representing the current request .
: type request : : class : ` webob . Request `""" | response = webob . Response ( request = request )
path = request . path_info
parsed = self . _urlmap ( path )
if parsed :
path_params , resource = parsed
else :
path_params , resource = { } , self . NOT_FOUND_RESOURCE
instance = resource ( request = request , response = response , path_params = path_params , application = self )
response = instance ( )
if request . method == 'HEAD' :
response . body = ''
return response |
def _render_dataframe ( dataframe ) :
"""Helper to render a dataframe as an HTML table .""" | data = dataframe . to_dict ( orient = 'records' )
fields = dataframe . columns . tolist ( )
return IPython . core . display . HTML ( datalab . utils . commands . HtmlBuilder . render_table ( data , fields ) ) |
def get_facets ( self ) :
'''Returns a dictionary of facets : :
> > > res = solr . query ( ' SolrClient _ unittest ' , {
' q ' : ' product _ name : Lorem ' ,
' facet ' : True ,
' facet . field ' : ' facet _ test ' ,
> > > res . get _ results _ count ( )
> > > res . get _ facets ( )
{ ' facet _ test ' : { ' ipsum ' : 0 , ' sit ' : 0 , ' dolor ' : 2 , ' amet , ' : 1 , ' Lorem ' : 1 } }''' | if not hasattr ( self , 'facets' ) :
self . facets = { }
data = self . data
if 'facet_counts' in data . keys ( ) and type ( data [ 'facet_counts' ] ) == dict :
if 'facet_fields' in data [ 'facet_counts' ] . keys ( ) and type ( data [ 'facet_counts' ] [ 'facet_fields' ] ) == dict :
for facetfield in data [ 'facet_counts' ] [ 'facet_fields' ] :
if type ( data [ 'facet_counts' ] [ 'facet_fields' ] [ facetfield ] == list ) :
l = data [ 'facet_counts' ] [ 'facet_fields' ] [ facetfield ]
self . facets [ facetfield ] = OrderedDict ( zip ( l [ : : 2 ] , l [ 1 : : 2 ] ) )
return self . facets
else :
raise SolrResponseError ( "No Facet Information in the Response" )
else :
return self . facets |
def ti ( self ) :
"""Include the Threat Intel Module .
. . Note : : Threat Intell methods can be accessed using ` ` tcex . ti . < method > ` ` .""" | if self . _ti is None :
from . tcex_ti import TcExTi
self . _ti = TcExTi ( self )
return self . _ti |
def stSpectralFlux ( X , X_prev ) :
"""Computes the spectral flux feature of the current frame
ARGUMENTS :
X : the abs ( fft ) of the current frame
X _ prev : the abs ( fft ) of the previous frame""" | # compute the spectral flux as the sum of square distances :
sumX = numpy . sum ( X + eps )
sumPrevX = numpy . sum ( X_prev + eps )
F = numpy . sum ( ( X / sumX - X_prev / sumPrevX ) ** 2 )
return F |
def go_to_background ( ) :
"""Daemonize the running process .""" | try :
if os . fork ( ) :
sys . exit ( )
except OSError as errmsg :
LOGGER . error ( 'Fork failed: {0}' . format ( errmsg ) )
sys . exit ( 'Fork failed' ) |
def read_data ( self , variable_instance ) :
"""read values from the device""" | if self . inst is None :
return
vp_func = variable_instance . variableproperty_set . filter ( name = ':FUNC' ) . first ( )
measure_function = ''
if vp_func :
if vp_func . value ( ) :
measure_function = ':FUNC "%s";' % vp_func . value ( )
trig_delay = 0.1
if variable_instance . visavariable . device_property . upper ( ) == 'PRESENT_VALUE' :
return self . parse_value ( self . inst . query ( ':FETCH?' ) )
m = re . search ( '(PRESENT_VALUE_CH)([0-9]*)' , variable_instance . visavariable . device_property . upper ( ) )
if m :
return self . parse_value ( self . inst . query ( ':route:close (@%s);%s:TRIG:DEL %1.3f;:fetch?' % ( m . group ( 2 ) , measure_function , trig_delay ) ) )
return self . parse_value ( self . inst . query ( variable_instance . visavariable . device_property . upper ( ) ) ) |
def get_tree_collection_strings ( self , scale = 1 , guide_tree = None ) :
"""Function to get input strings for tree _ collection
tree _ collection needs distvar , genome _ map and labels -
these are returned in the order above""" | records = [ self . collection [ i ] for i in self . indices ]
return TreeCollectionTaskInterface ( ) . scrape_args ( records ) |
def organize_commands ( corrected_commands ) :
"""Yields sorted commands without duplicates .
: type corrected _ commands : Iterable [ thefuck . types . CorrectedCommand ]
: rtype : Iterable [ thefuck . types . CorrectedCommand ]""" | try :
first_command = next ( corrected_commands )
yield first_command
except StopIteration :
return
without_duplicates = { command for command in sorted ( corrected_commands , key = lambda command : command . priority ) if command != first_command }
sorted_commands = sorted ( without_duplicates , key = lambda corrected_command : corrected_command . priority )
logs . debug ( 'Corrected commands: ' . format ( ', ' . join ( u'{}' . format ( cmd ) for cmd in [ first_command ] + sorted_commands ) ) )
for command in sorted_commands :
yield command |
def named_config ( name : str , config_dict : typing . Mapping ) -> None :
"""Adds a named config to the config registry . The first argument
may either be a string or a collection of strings .
This function should be called in a . konchrc file .""" | names = ( name if isinstance ( name , Iterable ) and not isinstance ( name , ( str , bytes ) ) else [ name ] )
for each in names :
_config_registry [ each ] = Config ( ** config_dict ) |
def _next_cTn_id ( self ) :
"""Return the next available unique ID ( int ) for p : cTn element .""" | cTn_id_strs = self . xpath ( '/p:sld/p:timing//p:cTn/@id' )
ids = [ int ( id_str ) for id_str in cTn_id_strs ]
return max ( ids ) + 1 |
def get_dataset ( self ) : # type : ( ) - > hdx . data . dataset . Dataset
"""Return dataset containing this resource
Returns :
hdx . data . dataset . Dataset : Dataset containing this resource""" | package_id = self . data . get ( 'package_id' )
if package_id is None :
raise HDXError ( 'Resource has no package id!' )
return hdx . data . dataset . Dataset . read_from_hdx ( package_id ) |
def write_dividend_data ( self , dividends , stock_dividends = None ) :
"""Write both dividend payouts and the derived price adjustment ratios .""" | # First write the dividend payouts .
self . _write_dividends ( dividends )
self . _write_stock_dividends ( stock_dividends )
# Second from the dividend payouts , calculate ratios .
dividend_ratios = self . calc_dividend_ratios ( dividends )
self . write_frame ( 'dividends' , dividend_ratios ) |
def get_global_vars ( func ) :
"""Store any methods or variables bound from the function ' s closure
Args :
func ( function ) : function to inspect
Returns :
dict : mapping of variable names to globally bound VARIABLES""" | closure = getclosurevars ( func )
if closure [ 'nonlocal' ] :
raise TypeError ( "Can't launch a job with closure variables: %s" % closure [ 'nonlocals' ] . keys ( ) )
globalvars = dict ( modules = { } , functions = { } , vars = { } )
for name , value in closure [ 'global' ] . items ( ) :
if inspect . ismodule ( value ) : # TODO : deal FUNCTIONS from closure
globalvars [ 'modules' ] [ name ] = value . __name__
elif inspect . isfunction ( value ) or inspect . ismethod ( value ) :
globalvars [ 'functions' ] [ name ] = value
else :
globalvars [ 'vars' ] [ name ] = value
return globalvars |
def get_msms_df ( model , pdb_id , outfile = None , outdir = None , outext = '_msms.df' , force_rerun = False ) :
"""Run MSMS ( using Biopython ) on a Biopython Structure Model .
Depths are in units Angstroms . 1A = 10 ^ - 10 m = 1nm . Returns a dictionary of : :
chain _ id : {
resnum1 _ id : ( res _ depth , ca _ depth ) ,
resnum2 _ id : ( res _ depth , ca _ depth )
Args :
model : Biopython Structure Model
Returns :
Pandas DataFrame : ResidueDepth property _ dict , reformatted""" | # XTODO : need to deal with temporary surface / vertex files in tmp directory when running on a large scale - -
# XTODO : will run into inode limits ! Also , some valuable information is in these MSMS output files that we should save .
# Create the output file name
outfile = ssbio . utils . outfile_maker ( inname = pdb_id , outname = outfile , outdir = outdir , outext = outext )
if ssbio . utils . force_rerun ( flag = force_rerun , outfile = outfile ) : # Run MSMS with Biopython
try :
rd = PDB . ResidueDepth ( model )
except AssertionError :
log . error ( '{}: unable to run MSMS' . format ( pdb_id ) )
return pd . DataFrame ( )
# Reorganize the results into a csv file
appender = [ ]
for k in rd . property_keys :
x = rd . property_dict [ k ]
chain = k [ 0 ]
residue = k [ 1 ]
het = residue [ 0 ]
resnum = residue [ 1 ]
icode = residue [ 2 ]
resdepth = x [ 0 ]
cadepth = x [ 1 ]
appender . append ( ( chain , resnum , icode , resdepth , cadepth ) )
df = pd . DataFrame . from_records ( appender , columns = [ 'chain' , 'resnum' , 'icode' , 'res_depth' , 'ca_depth' ] )
df . to_csv ( outfile )
else :
log . debug ( '{}: already ran MSMS and force_rerun={}, loading results' . format ( outfile , force_rerun ) )
df = pd . read_csv ( outfile , index_col = 0 )
return df |
def constant_coefficients ( d , timelines , constant = True , independent = 0 ) :
"""Proportional hazards model .
d : the dimension of the dataset
timelines : the observational times
constant : True for constant coefficients
independent : the number of coffients to set to 0 ( covariate is ind of survival ) , or
a list of covariates to make indepent .
returns a matrix ( t , d + 1 ) of coefficients""" | return time_varying_coefficients ( d , timelines , constant , independent = independent , randgen = random . normal ) |
def index ( self , prefix ) :
"""Return the model index for a prefix .""" | # Any web domain will be handled by the standard URLField .
if self . is_external_url_prefix ( prefix ) :
prefix = 'http'
for i , urltype in enumerate ( self . _url_types ) :
if urltype . prefix == prefix :
return i
return None |
def get ( self , symbol ) :
"""Gets a Symbol based on name , which is expected to exist .
Parameters
symbol : str or Symbol
Returns
Symbol
Raises
Exception
If it does not exist . Use . try _ to _ get ( ) ,
if the symbol may or may not exist .""" | syms = self . try_to_get ( symbol )
if syms is None :
raise Exception ( "Symbol {} does not exist" . format ( symbol ) )
else :
return syms |
def increment ( self , key , cache = None , amount = 1 ) :
"""Query the server to increment the value of the key by the specified
amount . Negative amounts can be used to decrement .
Keyword arguments :
key - - the key the item is stored under . Required .
cache - - the cache the item belongs to . Defaults to None , which uses
self . name . If no name is set , raises a ValueError .
amount - - the amount to increment the value by . Can be negative to
decrement the value . Defaults to 1.""" | if cache is None :
cache = self . name
if cache is None :
raise ValueError ( "Cache name must be set" )
cache = quote_plus ( cache )
key = quote_plus ( key )
body = json . dumps ( { "amount" : amount } )
result = self . client . post ( "caches/%s/items/%s/increment" % ( cache , key ) , body , { "Content-Type" : "application/json" } )
result = result [ "body" ]
return Item ( values = result , cache = cache , key = key ) |
def subprocess ( self ) :
"""Retrieve the subprocess in which this activity is defined .
If this is a task on top level , it raises NotFounderror .
: return : a subprocess : class : ` Activity `
: raises NotFoundError : when it is a task in the top level of a project
: raises APIError : when other error occurs
Example
> > > task = project . activity ( ' Subtask ' )
> > > subprocess = task . subprocess ( )""" | subprocess_id = self . _json_data . get ( 'container' )
if subprocess_id == self . _json_data . get ( 'root_container' ) :
raise NotFoundError ( "Cannot find subprocess for this task '{}', " "as this task exist on top level." . format ( self . name ) )
return self . _client . activity ( pk = subprocess_id , scope = self . scope_id ) |
def set ( self , key , * args ) :
"""Hash the key and set it in the cache""" | return self . cache . set ( self . _hashed ( key ) , * args ) |
def connectivity_array ( self ) :
"""Provides connectivity array .
Returns :
connectivity : An array of shape [ atomi , atomj , imagej ] . atomi is
the index of the atom in the input structure . Since the second
atom can be outside of the unit cell , it must be described
by both an atom index and an image index . Array data is the
solid angle of polygon between atomi and imagej of atomj""" | # shape = [ site , axis ]
cart_coords = np . array ( self . s . cart_coords )
# shape = [ site , image , axis ]
all_sites = cart_coords [ : , None , : ] + self . cart_offsets [ None , : , : ]
vt = Voronoi ( all_sites . reshape ( ( - 1 , 3 ) ) )
n_images = all_sites . shape [ 1 ]
cs = ( len ( self . s ) , len ( self . s ) , len ( self . cart_offsets ) )
connectivity = np . zeros ( cs )
vts = np . array ( vt . vertices )
for ( ki , kj ) , v in vt . ridge_dict . items ( ) :
atomi = ki // n_images
atomj = kj // n_images
imagei = ki % n_images
imagej = kj % n_images
if imagei != n_images // 2 and imagej != n_images // 2 :
continue
if imagei == n_images // 2 : # atomi is in original cell
val = solid_angle ( vt . points [ ki ] , vts [ v ] )
connectivity [ atomi , atomj , imagej ] = val
if imagej == n_images // 2 : # atomj is in original cell
val = solid_angle ( vt . points [ kj ] , vts [ v ] )
connectivity [ atomj , atomi , imagei ] = val
if - 10.101 in vts [ v ] :
warn ( 'Found connectivity with infinite vertex. ' 'Cutoff is too low, and results may be ' 'incorrect' )
return connectivity |
def benchmark_mitdb_record ( rec , detector , verbose ) :
"""Benchmark a single mitdb record""" | sig , fields = rdsamp ( rec , pb_dir = 'mitdb' , channels = [ 0 ] )
ann_ref = rdann ( rec , pb_dir = 'mitdb' , extension = 'atr' )
qrs_inds = detector ( sig = sig [ : , 0 ] , fs = fields [ 'fs' ] , verbose = verbose )
comparitor = compare_annotations ( ref_sample = ann_ref . sample [ 1 : ] , test_sample = qrs_inds , window_width = int ( 0.1 * fields [ 'fs' ] ) )
if verbose :
print ( 'Finished record %s' % rec )
return comparitor |
def update ( self , password = values . unset ) :
"""Update the CredentialInstance
: param unicode password : The password will not be returned in the response
: returns : Updated CredentialInstance
: rtype : twilio . rest . api . v2010 . account . sip . credential _ list . credential . CredentialInstance""" | data = values . of ( { 'Password' : password , } )
payload = self . _version . update ( 'POST' , self . _uri , data = data , )
return CredentialInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , credential_list_sid = self . _solution [ 'credential_list_sid' ] , sid = self . _solution [ 'sid' ] , ) |
def generate ( env ) :
"""Add Builders and construction variables for C compilers to an Environment .""" | static_obj , shared_obj = SCons . Tool . createObjBuilders ( env )
for suffix in CSuffixes :
static_obj . add_action ( suffix , SCons . Defaults . CAction )
shared_obj . add_action ( suffix , SCons . Defaults . ShCAction )
static_obj . add_emitter ( suffix , SCons . Defaults . StaticObjectEmitter )
shared_obj . add_emitter ( suffix , SCons . Defaults . SharedObjectEmitter )
add_common_cc_variables ( env )
if 'CC' not in env :
env [ 'CC' ] = env . Detect ( compilers ) or compilers [ 0 ]
env [ 'CFLAGS' ] = SCons . Util . CLVar ( '' )
env [ 'CCCOM' ] = '$CC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env [ 'SHCC' ] = '$CC'
env [ 'SHCFLAGS' ] = SCons . Util . CLVar ( '$CFLAGS' )
env [ 'SHCCCOM' ] = '$SHCC -o $TARGET -c $SHCFLAGS $SHCCFLAGS $_CCCOMCOM $SOURCES'
env [ 'CPPDEFPREFIX' ] = '-D'
env [ 'CPPDEFSUFFIX' ] = ''
env [ 'INCPREFIX' ] = '-I'
env [ 'INCSUFFIX' ] = ''
env [ 'SHOBJSUFFIX' ] = '.os'
env [ 'STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME' ] = 0
env [ 'CFILESUFFIX' ] = '.c' |
def attempt_social_login ( self , provider , id ) :
"""Attempt social login and return boolean result""" | if not provider or not id :
return False
params = dict ( )
params [ provider . lower ( ) + '_id' ] = id
user = self . first ( ** params )
if not user :
return False
self . force_login ( user )
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.