signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _insert_one ( self , doc , ordered , check_keys , manipulate , write_concern , op_id , bypass_doc_val , session ) :
"""Internal helper for inserting a single document .""" | if manipulate :
doc = self . __database . _apply_incoming_manipulators ( doc , self )
if not isinstance ( doc , RawBSONDocument ) and '_id' not in doc :
doc [ '_id' ] = ObjectId ( )
doc = self . __database . _apply_incoming_copying_manipulators ( doc , self )
write_concern = write_concern or self . write_concern
acknowledged = write_concern . acknowledged
command = SON ( [ ( 'insert' , self . name ) , ( 'ordered' , ordered ) , ( 'documents' , [ doc ] ) ] )
if not write_concern . is_server_default :
command [ 'writeConcern' ] = write_concern . document
def _insert_command ( session , sock_info , retryable_write ) :
if not sock_info . op_msg_enabled and not acknowledged : # Legacy OP _ INSERT .
return self . _legacy_write ( sock_info , 'insert' , command , op_id , bypass_doc_val , message . insert , self . __full_name , [ doc ] , check_keys , False , write_concern . document , False , self . __write_response_codec_options )
if bypass_doc_val and sock_info . max_wire_version >= 4 :
command [ 'bypassDocumentValidation' ] = True
result = sock_info . command ( self . __database . name , command , write_concern = write_concern , codec_options = self . __write_response_codec_options , check_keys = check_keys , session = session , client = self . __database . client , retryable_write = retryable_write )
_check_write_command_response ( result )
self . __database . client . _retryable_write ( acknowledged , _insert_command , session )
if not isinstance ( doc , RawBSONDocument ) :
return doc . get ( '_id' ) |
def remove_key ( pki_dir , id_ ) :
'''This method removes a specified key from the accepted keys dir''' | key = os . path . join ( pki_dir , 'minions' , id_ )
if os . path . isfile ( key ) :
os . remove ( key )
log . debug ( 'Deleted \'%s\'' , key ) |
def load_combo_catalog ( ) :
"""Load a union of the user and global catalogs for convenience""" | user_dir = user_data_dir ( )
global_dir = global_data_dir ( )
desc = 'Generated from data packages found on your intake search path'
cat_dirs = [ ]
if os . path . isdir ( user_dir ) :
cat_dirs . append ( user_dir + '/*.yaml' )
cat_dirs . append ( user_dir + '/*.yml' )
if os . path . isdir ( global_dir ) :
cat_dirs . append ( global_dir + '/*.yaml' )
cat_dirs . append ( global_dir + '/*.yml' )
for path_dir in conf . get ( 'catalog_path' , [ ] ) :
if path_dir != '' :
if not path_dir . endswith ( ( 'yaml' , 'yml' ) ) :
cat_dirs . append ( path_dir + '/*.yaml' )
cat_dirs . append ( path_dir + '/*.yml' )
else :
cat_dirs . append ( path_dir )
return YAMLFilesCatalog ( cat_dirs , name = 'builtin' , description = desc ) |
async def await_rpc ( self , address , rpc_id , * args , ** kwargs ) :
"""Send an RPC from inside the EmulationLoop .
This is the primary method by which tasks running inside the
EmulationLoop dispatch RPCs . The RPC is added to the queue of waiting
RPCs to be drained by the RPC dispatch task and this coroutine will
block until it finishes .
* * This method must only be called from inside the EmulationLoop * *
Args :
address ( int ) : The address of the tile that has the RPC .
rpc _ id ( int ) : The 16 - bit id of the rpc we want to call
* args : Any required arguments for the RPC as python objects .
* * kwargs : Only two keyword arguments are supported :
- arg _ format : A format specifier for the argument list
- result _ format : A format specifier for the result
Returns :
list : A list of the decoded response members from the RPC .""" | self . verify_calling_thread ( True , "await_rpc must be called from **inside** the event loop" )
if isinstance ( rpc_id , RPCDeclaration ) :
arg_format = rpc_id . arg_format
resp_format = rpc_id . resp_format
rpc_id = rpc_id . rpc_id
else :
arg_format = kwargs . get ( 'arg_format' , None )
resp_format = kwargs . get ( 'resp_format' , None )
arg_payload = b''
if arg_format is not None :
arg_payload = pack_rpc_payload ( arg_format , args )
self . _logger . debug ( "Sending rpc to %d:%04X, payload=%s" , address , rpc_id , args )
response = AwaitableResponse ( )
self . _rpc_queue . put_rpc ( address , rpc_id , arg_payload , response )
try :
resp_payload = await response . wait ( 1.0 )
except RPCRuntimeError as err :
resp_payload = err . binary_error
if resp_format is None :
return [ ]
resp = unpack_rpc_payload ( resp_format , resp_payload )
return resp |
def create_table ( table , data ) :
"""Create table with defined name and fields
: return : None""" | fields = data [ 'fields' ]
query = '('
indexed_fields = ''
for key , value in fields . items ( ) :
non_case_field = value [ 0 ] [ 0 : value [ 0 ] . find ( '(' ) ]
if non_case_field == 'int' :
sign = value [ 0 ] [ value [ 0 ] . find ( ',' ) + 1 : - 1 : ] . strip ( )
if sign == 'signed' :
field_type = 'Int'
else :
field_type = 'UInt'
bits = re . findall ( '\d+' , value [ 0 ] ) [ 0 ]
field = key + ' ' + field_type + bits
query += field + ','
elif non_case_field == 'strin' :
field_type = 'String'
field = key + ' ' + field_type
query += field + ','
elif non_case_field == 'float' :
field_type = 'Float'
bits = re . findall ( '\d+' , value [ 0 ] ) [ 0 ]
field = key + ' ' + field_type + bits
query += field + ','
if value [ 1 ] == 'yes' :
indexed_fields += key + ','
query = query [ : - 1 : ] + f",date Date) ENGINE = MergeTree(date, ({indexed_fields} date), 8192)"
client . execute ( f"CREATE TABLE {table} {query}" ) |
def get_objective_bank_hierarchy_design_session ( self ) :
"""Gets the session designing objective bank hierarchies .
return : ( osid . learning . ObjectiveBankHierarchyDesignSession ) - an
ObjectiveBankHierarchyDesignSession
raise : OperationFailed - unable to complete request
raise : Unimplemented -
supports _ objective _ bank _ hierarchy _ design ( ) is false
compliance : optional - This method must be implemented if
supports _ objective _ bank _ hierarchy _ design ( ) is true .""" | if not self . supports_objective_bank_hierarchy_design ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
try :
session = sessions . ObjectiveBankHierarchyDesignSession ( runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def show ( self , user , identity ) :
"""Show the specified identity for the specified user .
: param user : user id or User object
: param identity : identity id object
: return : Identity""" | url = self . _build_url ( self . endpoint . show ( user , identity ) )
return self . _get ( url ) |
def user_list ( self , userid , cur_p = '' ) :
'''List the entities of the user .''' | current_page_number = int ( cur_p ) if cur_p else 1
current_page_number = 1 if current_page_number < 1 else current_page_number
kwd = { 'current_page' : current_page_number }
recs = MEntity2User . get_all_pager_by_username ( userid , current_page_num = current_page_number ) . objects ( )
self . render ( 'misc/entity/entity_user_download.html' , imgs = recs , cfg = config . CMS_CFG , kwd = kwd , userinfo = self . userinfo ) |
def err ( msg , level = - 1 , prefix = True ) :
"""Prints the specified message as an error ; prepends " ERROR " to
the message , so that can be left off .""" | if will_print ( level ) or verbosity is None :
printer ( ( "ERROR: " if prefix else "" ) + msg , "red" ) |
def on_click ( self , event ) :
"""Click events
- left click & scroll up / down : switch between rotations
- right click : apply selected rotation""" | button = event [ "button" ]
if button in [ 1 , 4 , 5 ] :
self . scrolling = True
self . _switch_selection ( )
elif button == 3 :
self . _apply ( ) |
def all_modules_subpattern ( ) :
u"""Builds a pattern for all toplevel names
( urllib , http , etc )""" | names_dot_attrs = [ mod . split ( u"." ) for mod in MAPPING ]
ret = u"( " + u" | " . join ( [ dotted_name % ( simple_name % ( mod [ 0 ] ) , simple_attr % ( mod [ 1 ] ) ) for mod in names_dot_attrs ] )
ret += u" | "
ret += u" | " . join ( [ simple_name % ( mod [ 0 ] ) for mod in names_dot_attrs if mod [ 1 ] == u"__init__" ] ) + u" )"
return ret |
def log ( obj1 , obj2 , sym , cname = None , aname = None , result = None ) : # pylint : disable = R0913
"""Log the objects being compared and the result .
When no result object is specified , subsequence calls will have an
increased indentation level . The indentation level is decreased
once a result object is provided .
@ param obj1 : first object
@ param obj2 : second object
@ param sym : operation being performed ( ' = = ' or ' % ' )
@ param cname : name of class ( when attributes are being compared )
@ param aname : name of attribute ( when attributes are being compared )
@ param result : outcome of comparison""" | fmt = "{o1} {sym} {o2} : {r}"
if cname or aname :
assert cname and aname
# both must be specified
fmt = "{c}.{a}: " + fmt
if result is None :
result = '...'
fmt = _Indent . indent ( fmt )
_Indent . more ( )
else :
_Indent . less ( )
fmt = _Indent . indent ( fmt )
msg = fmt . format ( o1 = repr ( obj1 ) , o2 = repr ( obj2 ) , c = cname , a = aname , sym = sym , r = result )
logging . info ( msg ) |
def set_column_si_format ( tree_column , model_column_index , cell_renderer = None , digits = 2 ) :
'''Set the text of a numeric cell according to [ SI prefixes ] [ 1]
For example , ` 1000 - > ' 1.00k ' ` .
[1 ] : https : / / en . wikipedia . org / wiki / Metric _ prefix # List _ of _ SI _ prefixes
Args :
tree _ column ( gtk . TreeViewColumn ) : Tree view to append columns to .
model _ column _ index ( int ) : Index in list store model corresponding to
tree view column .
cell _ renderer ( gtk . CellRenderer ) : Cell renderer for column . If
` None ` , defaults to all cell renderers for column .
digits ( int ) : Number of digits after decimal ( default = 2 ) .
Returns :
None''' | def set_property ( column , cell_renderer , list_store , iter , store_i ) :
cell_renderer . set_property ( 'text' , si_format ( list_store [ iter ] [ store_i ] , digits ) )
if cell_renderer is None :
cells = tree_column . get_cells ( )
else :
cells = [ cell_renderer ]
for cell_renderer_i in cells :
tree_column . set_cell_data_func ( cell_renderer_i , set_property , model_column_index ) |
def integrate_to_file ( what , filename , start_line , end_line ) :
"""WARNING this is working every second run . . so serious bug
Integrate content into a file withing " line marks " """ | try :
with open ( filename ) as f :
lines = f . readlines ( )
except IOError :
lines = [ ]
tmp_file = tempfile . NamedTemporaryFile ( delete = False )
lines . reverse ( )
# first copy before start line
while lines :
line = lines . pop ( )
if line == start_line :
break
tmp_file . write ( line )
# insert content
tmp_file . write ( start_line )
tmp_file . write ( what )
tmp_file . write ( end_line )
# skip until end line
while lines :
line = lines . pop ( )
if line == end_line :
break
# copy rest
tmp_file . writelines ( lines )
tmp_file . close ( )
os . rename ( tmp_file . name , filename ) |
def _lookup_drill ( name , rdtype , timeout = None , servers = None , secure = None ) :
'''Use drill to lookup addresses
: param name : Name of record to search
: param rdtype : DNS record type
: param timeout : command return timeout
: param servers : [ ] of servers to use
: return : [ ] of records or False if error''' | cmd = 'drill '
if secure :
cmd += '-D -o ad '
cmd += '{0} {1} ' . format ( rdtype , name )
if servers :
cmd += '' . join ( [ '@{0} ' . format ( srv ) for srv in servers ] )
cmd = __salt__ [ 'cmd.run_all' ] ( cmd , timeout = timeout , python_shell = False , output_loglevel = 'quiet' )
if cmd [ 'retcode' ] != 0 :
log . warning ( 'drill returned (%s): %s' , cmd [ 'retcode' ] , cmd [ 'stderr' ] )
return False
lookup_res = iter ( cmd [ 'stdout' ] . splitlines ( ) )
validated = False
res = [ ]
try :
line = ''
while 'ANSWER SECTION' not in line :
line = next ( lookup_res )
while True :
line = next ( lookup_res )
line = line . strip ( )
if not line or line . startswith ( ';;' ) :
break
l_type , l_rec = line . split ( None , 4 ) [ - 2 : ]
if l_type == 'CNAME' and rdtype != 'CNAME' :
continue
elif l_type == 'RRSIG' :
validated = True
continue
elif l_type != rdtype :
raise ValueError ( 'Invalid DNS type {}' . format ( rdtype ) )
res . append ( _data_clean ( l_rec ) )
except StopIteration :
pass
if res and secure and not validated :
return False
else :
return res |
def trigger_info ( self , trigger = None , dump = False ) :
"""Get information about a trigger .
Pass in a raw trigger to find out what file name and line number it
appeared at . This is useful for e . g . tracking down the location of the
trigger last matched by the user via ` ` last _ match ( ) ` ` . Returns a list
of matching triggers , containing their topics , filenames and line
numbers . Returns ` ` None ` ` if there weren ' t any matches found .
The keys in the trigger info is as follows :
* ` ` category ` ` : Either ' topic ' ( for normal ) or ' thats '
( for % Previous triggers )
* ` ` topic ` ` : The topic name
* ` ` trigger ` ` : The raw trigger text
* ` ` filename ` ` : The filename the trigger was found in .
* ` ` lineno ` ` : The line number the trigger was found on .
Pass in a true value for ` ` dump ` ` , and the entire syntax tracking
tree is returned .
: param str trigger : The raw trigger text to look up .
: param bool dump : Whether to dump the entire syntax tracking tree .
: return : A list of matching triggers or ` ` None ` ` if no matches .""" | if dump :
return self . _syntax
response = None
# Search the syntax tree for the trigger .
for category in self . _syntax :
for topic in self . _syntax [ category ] :
if trigger in self . _syntax [ category ] [ topic ] : # We got a match !
if response is None :
response = list ( )
fname , lineno = self . _syntax [ category ] [ topic ] [ trigger ] [ 'trigger' ]
response . append ( dict ( category = category , topic = topic , trigger = trigger , filename = fname , line = lineno , ) )
return response |
def load_progress ( self , resume_step ) :
"""load _ progress : loads progress from restoration file
Args : resume _ step ( str ) : step at which to resume session
Returns : manager with progress from step""" | resume_step = Status [ resume_step ]
progress_path = self . get_restore_path ( resume_step )
# If progress is corrupted , revert to step before
while not self . check_for_session ( resume_step ) :
config . LOGGER . error ( "Ricecooker has not reached {0} status. Reverting to earlier step..." . format ( resume_step . name ) )
# All files are corrupted or absent , restart process
if resume_step . value - 1 < 0 :
self . init_session ( )
return self
resume_step = Status ( resume_step . value - 1 )
progress_path = self . get_restore_path ( resume_step )
config . LOGGER . error ( "Starting from status {0}" . format ( resume_step . name ) )
# Load manager
with open ( progress_path , 'rb' ) as handle :
manager = pickle . load ( handle )
if isinstance ( manager , RestoreManager ) :
return manager
else :
return self |
def bn2float ( module : nn . Module ) -> nn . Module :
"If ` module ` is batchnorm don ' t use half precision ." | if isinstance ( module , torch . nn . modules . batchnorm . _BatchNorm ) :
module . float ( )
for child in module . children ( ) :
bn2float ( child )
return module |
def urlopen ( self , url , ** kwargs ) :
"""GET a file - like object for a URL using HTTP .
This is a thin wrapper around : meth : ` requests . Session . get ` that returns a file - like
object wrapped around the resulting content .
Parameters
url : str
The URL to request
kwargs : arbitrary keyword arguments
Additional keyword arguments to pass to : meth : ` requests . Session . get ` .
Returns
fobj : file - like object
A file - like interface to the content in the response
See Also
: meth : ` requests . Session . get `""" | return BytesIO ( self . create_session ( ) . get ( url , ** kwargs ) . content ) |
def _from_engine ( cls , data , alias_list ) :
"""Return an alias for the engine . The data is dict provided
when calling engine . alias _ resolving ( ) . The alias list is
the list of aliases pre - fetched from Alias . objects . all ( ) .
This will return an Alias element by taking the alias _ ref
and finding the name in the alias list .
: rtype : Alias""" | for alias in alias_list :
href = data . get ( 'alias_ref' )
if alias . href == href :
_alias = Alias ( alias . name , href = href )
_alias . resolved_value = data . get ( 'resolved_value' )
_alias . typeof = alias . _meta . type
return _alias |
def _find_cgroup_mounts ( ) :
"""Return the information which subsystems are mounted where .
@ return a generator of tuples ( subsystem , mountpoint )""" | try :
with open ( '/proc/mounts' , 'rt' ) as mountsFile :
for mount in mountsFile :
mount = mount . split ( ' ' )
if mount [ 2 ] == 'cgroup' :
mountpoint = mount [ 1 ]
options = mount [ 3 ]
for option in options . split ( ',' ) :
if option in ALL_KNOWN_SUBSYSTEMS :
yield ( option , mountpoint )
except IOError :
logging . exception ( 'Cannot read /proc/mounts' ) |
def getVerificators ( self ) :
"""Returns the user ids of the users that verified this analysis""" | verifiers = list ( )
actions = [ "verify" , "multi_verify" ]
for event in wf . getReviewHistory ( self ) :
if event [ 'action' ] in actions :
verifiers . append ( event [ 'actor' ] )
sorted ( verifiers , reverse = True )
return verifiers |
def cycle_slice ( sliceable , start , end ) :
"""Given a list , return right hand cycle direction slice from start to end .
Usage : :
> > > array = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9]
> > > cycle _ slice ( array , 4 , 7 ) # from array [ 4 ] to array [ 7]
[4 , 5 , 6 , 7]
> > > cycle _ slice ( array , 8 , 2 ) # from array [ 8 ] to array [ 2]
[8 , 9 , 0 , 1 , 2]""" | if type ( sliceable ) != list :
sliceable = list ( sliceable )
if end >= start :
return sliceable [ start : end + 1 ]
else :
return sliceable [ start : ] + sliceable [ : end + 1 ] |
def _generate_examples ( self , archive ) :
"""Yields examples .""" | prefix_len = len ( "SUN397" )
with tf . Graph ( ) . as_default ( ) :
with utils . nogpu_session ( ) as sess :
for filepath , fobj in archive :
if ( filepath . endswith ( ".jpg" ) and filepath not in _SUN397_IGNORE_IMAGES ) : # Note : all files in the tar . gz are in SUN397 / . . .
filename = filepath [ prefix_len : ]
# Example :
# From filename : / c / car _ interior / backseat / sun _ aenygxwhhmjtisnf . jpg
# To class : / c / car _ interior / backseat
label = "/" . join ( filename . split ( "/" ) [ : - 1 ] )
image = _process_image_file ( fobj , sess , filepath )
yield { "file_name" : filename , "image" : image , "label" : label , } |
def fetch_data_detailled_energy_use ( self , start_date = None , end_date = None ) :
"""Get detailled energy use from a specific contract .""" | if start_date is None :
start_date = datetime . datetime . now ( HQ_TIMEZONE ) - datetime . timedelta ( days = 1 )
if end_date is None :
end_date = datetime . datetime . now ( HQ_TIMEZONE )
# Get http session
yield from self . _get_httpsession ( )
# Get login page
login_url = yield from self . _get_login_page ( )
# Post login page
yield from self . _post_login_page ( login_url )
# Get p _ p _ id and contracts
p_p_id , contracts = yield from self . _get_p_p_id_and_contract ( )
# If we don ' t have any contrats that means we have only
# onecontract . Let ' s get it
if contracts == { } :
contracts = yield from self . _get_lonely_contract ( )
# For all contracts
for contract , contract_url in contracts . items ( ) :
if contract_url :
yield from self . _load_contract_page ( contract_url )
data = { }
dates = [ ( start_date + datetime . timedelta ( n ) ) for n in range ( int ( ( end_date - start_date ) . days ) ) ]
for date in dates : # Get Hourly data
day_date = date . strftime ( "%Y-%m-%d" )
hourly_data = yield from self . _get_hourly_data ( day_date , p_p_id )
data [ day_date ] = hourly_data [ 'raw_hourly_data' ]
# Add contract
self . _data [ contract ] = data |
def tree2hdf5 ( tree , hfile , group = None , entries = - 1 , show_progress = False , ** kwargs ) :
"""Convert a TTree into a HDF5 table .
Parameters
tree : ROOT . TTree
A ROOT TTree .
hfile : string or PyTables HDF5 File
A PyTables HDF5 File handle or string path to an existing HDF5 file .
group : string or PyTables Group instance , optional ( default = None )
Write the table at this location in the HDF5 file .
entries : int , optional ( default = - 1)
The number of entries to read at once while converting a ROOT TTree
into an HDF5 table . By default read the entire TTree into memory ( this
may not be desired if your TTrees are large ) .
show _ progress : bool , optional ( default = False )
If True , then display and update a progress bar on stdout as the TTree
is converted .
kwargs : dict , optional
Additional keyword arguments for the tree2array function .""" | show_progress = show_progress and check_tty ( sys . stdout )
if show_progress :
widgets = [ Percentage ( ) , ' ' , Bar ( ) , ' ' , ETA ( ) ]
own_h5file = False
if isinstance ( hfile , string_types ) :
hfile = tables_open ( filename = hfile , mode = "w" , title = "Data" )
own_h5file = True
log . info ( "Converting tree '{0}' with {1:d} entries ..." . format ( tree . GetName ( ) , tree . GetEntries ( ) ) )
if not group :
group = hfile . root
elif isinstance ( group , string_types ) :
group_where = '/' + os . path . dirname ( group )
group_name = os . path . basename ( group )
if TABLES_NEW_API :
group = hfile . create_group ( group_where , group_name , createparents = True )
else :
group = hfile . createGroup ( group_where , group_name )
if tree . GetName ( ) in group :
log . warning ( "Tree '{0}' already exists " "in the output file" . format ( tree . GetName ( ) ) )
return
total_entries = tree . GetEntries ( )
pbar = None
if show_progress and total_entries > 0 :
pbar = ProgressBar ( widgets = widgets , maxval = total_entries )
if entries <= 0 : # read the entire tree
if pbar is not None :
pbar . start ( )
array = tree2array ( tree , ** kwargs )
array = _drop_object_col ( array )
if TABLES_NEW_API :
table = hfile . create_table ( group , tree . GetName ( ) , array , tree . GetTitle ( ) )
else :
table = hfile . createTable ( group , tree . GetName ( ) , array , tree . GetTitle ( ) )
# flush data in the table
table . flush ( )
# flush all pending data
hfile . flush ( )
else : # read the tree in chunks
start = 0
while start < total_entries or start == 0 :
if start > 0 :
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , RootNumpyUnconvertibleWarning )
warnings . simplefilter ( "ignore" , tables . NaturalNameWarning )
array = tree2array ( tree , start = start , stop = start + entries , ** kwargs )
array = _drop_object_col ( array , warn = False )
table . append ( array )
else :
array = tree2array ( tree , start = start , stop = start + entries , ** kwargs )
array = _drop_object_col ( array )
if pbar is not None : # start after any output from root _ numpy
pbar . start ( )
if TABLES_NEW_API :
table = hfile . create_table ( group , tree . GetName ( ) , array , tree . GetTitle ( ) )
else :
table = hfile . createTable ( group , tree . GetName ( ) , array , tree . GetTitle ( ) )
start += entries
if start <= total_entries and pbar is not None :
pbar . update ( start )
# flush data in the table
table . flush ( )
# flush all pending data
hfile . flush ( )
if pbar is not None :
pbar . finish ( )
if own_h5file :
hfile . close ( ) |
def convert_elementwise_add ( net , node , module , builder ) :
"""Convert an elementwise add layer from mxnet to coreml .
Parameters
network : net
A mxnet network object .
layer : node
Node to convert .
module : module
An module for MXNet
builder : NeuralNetworkBuilder
A neural network builder object .""" | input_names , output_name = _get_input_output_name ( net , node , [ 0 , 1 ] )
name = node [ 'name' ]
builder . add_elementwise ( name , input_names , output_name , 'ADD' ) |
def app1 ( self ) :
"""First APP1 marker in image markers .""" | for m in self . _markers :
if m . marker_code == JPEG_MARKER_CODE . APP1 :
return m
raise KeyError ( 'no APP1 marker in image' ) |
def ui ( root_url , path ) :
"""Generate URL for a path in the Taskcluster ui .
The purpose of the function is to switch on rootUrl :
" The driver for having a ui method is so we can just call ui with a path and any root url ,
and the returned url should work for both our current deployment ( with root URL = https : / / taskcluster . net )
and any future deployment . The returned value is essentially rootURL = = ' https : / / taskcluster . net '
' https : / / tools . taskcluster . net / $ { path } '
' $ { rootURL } / $ { path } ' " """ | root_url = root_url . rstrip ( '/' )
path = path . lstrip ( '/' )
if root_url == OLD_ROOT_URL :
return 'https://tools.taskcluster.net/{}' . format ( path )
else :
return '{}/{}' . format ( root_url , path ) |
def performAction ( self , action ) :
"""Execute one action .""" | # print " ACTION : " , action
self . t += 1
Task . performAction ( self , action )
# self . addReward ( )
self . samples += 1 |
def _paginate ( url , topkey , * args , ** kwargs ) :
'''Wrapper to assist with paginated responses from Digicert ' s REST API .''' | ret = salt . utils . http . query ( url , ** kwargs )
if 'errors' in ret [ 'dict' ] :
return ret [ 'dict' ]
lim = int ( ret [ 'dict' ] [ 'page' ] [ 'limit' ] )
total = int ( ret [ 'dict' ] [ 'page' ] [ 'total' ] )
if total == 0 :
return { }
numpages = ( total / lim ) + 1
# If the count returned is less than the page size , just return the dict
if numpages == 1 :
return ret [ 'dict' ] [ topkey ]
aggregate_ret = ret [ 'dict' ] [ topkey ]
url = args [ 0 ]
for p in range ( 2 , numpages ) :
param_url = url + '?offset={0}' . format ( lim * ( p - 1 ) )
next_ret = salt . utils . http . query ( param_url , kwargs )
aggregate_ret [ topkey ] . extend ( next_ret [ 'dict' ] [ topkey ] )
return aggregate_ret |
def score ( self , X , y ) :
"""Force use of accuracy score since we don ' t inherit
from ClassifierMixin""" | from sklearn . metrics import accuracy_score
return accuracy_score ( y , self . predict ( X ) ) |
def list_all_states ( cls , ** kwargs ) :
"""List States
Return a list of States
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . list _ all _ states ( async = True )
> > > result = thread . get ( )
: param async bool
: param int page : page number
: param int size : page size
: param str sort : page order
: return : page [ State ]
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _list_all_states_with_http_info ( ** kwargs )
else :
( data ) = cls . _list_all_states_with_http_info ( ** kwargs )
return data |
def get ( self , key , default = None , type_ = None ) :
"""Return the last data value for the passed key . If key doesn ' t exist
or value is an empty list , return ` default ` .""" | try :
rv = self [ key ]
except KeyError :
return default
if type_ is not None :
try :
rv = type_ ( rv )
except ValueError :
rv = default
return rv |
def task_start_time ( self ) :
"""Return the time the task starts .
Time is set according to iso8601.""" | return datetime . time ( self . task_start_parameters [ ATTR_SMART_TASK_TRIGGER_TIME_START_HOUR ] , self . task_start_parameters [ ATTR_SMART_TASK_TRIGGER_TIME_START_MIN ] ) |
def search ( self , base_dn , search_filter , attributes = ( ) ) :
"""Perform an AD search
: param str base _ dn : The base DN to search within
: param str search _ filter : The search filter to apply , such as :
* objectClass = person *
: param list attributes : Object attributes to populate , defaults to all""" | results = [ ]
page = 0
while page == 0 or self . sprc . cookie :
page += 1
# pylint : disable = no - member
message_id = self . ldap . search_ext ( base_dn , ldap . SCOPE_SUBTREE , search_filter , attributes , serverctrls = [ self . sprc ] )
# pylint : enable = no - member
data , server_controls = self . ldap . result3 ( message_id ) [ 1 : : 2 ]
self . sprc . cookie = server_controls [ 0 ] . cookie
logging . debug ( '%s - Page %s results: %s' , self . __class__ . __name__ , page , ', ' . join ( k [ 0 ] for k in data ) )
results += [ u for u in data ]
return results |
def decode_mysql_literal ( text ) :
"""Attempts to decode given MySQL literal into Python value .
: param text : Value to be decoded , as MySQL literal .
: type text : str
: return : Python version of the given MySQL literal .
: rtype : any""" | if MYSQL_NULL_PATTERN . match ( text ) :
return None
if MYSQL_BOOLEAN_PATTERN . match ( text ) :
return text . lower ( ) == "true"
if MYSQL_FLOAT_PATTERN . match ( text ) :
return float ( text )
if MYSQL_INT_PATTERN . match ( text ) :
return int ( text )
if MYSQL_STRING_PATTERN . match ( text ) :
return decode_mysql_string_literal ( text )
raise ValueError ( "Unable to decode given value: %r" % ( text , ) ) |
def _adjust_rate ( self , real_wave_mfcc , algo_parameters ) :
"""RATE""" | self . log ( u"Called _adjust_rate" )
self . _apply_rate ( max_rate = algo_parameters [ 0 ] , aggressive = False ) |
def citation_director ( ** kwargs ) :
"""Direct the citation elements based on their qualifier .""" | qualifier = kwargs . get ( 'qualifier' , '' )
content = kwargs . get ( 'content' , '' )
if qualifier == 'publicationTitle' :
return CitationJournalTitle ( content = content )
elif qualifier == 'volume' :
return CitationVolume ( content = content )
elif qualifier == 'issue' :
return CitationIssue ( content = content )
elif qualifier == 'pageStart' :
return CitationFirstpage ( content = content )
elif qualifier == 'pageEnd' :
return CitationLastpage ( content = content )
else :
return None |
def purge_duplicates ( list_in ) :
"""Remove duplicates from list while preserving order .
Parameters
list _ in : Iterable
Returns
list
List of first occurences in order""" | _list = [ ]
for item in list_in :
if item not in _list :
_list . append ( item )
return _list |
def display_image_file ( fn , width = 'auto' , height = 'auto' , preserve_aspect_ratio = None ) :
"""Display an image in the terminal .
A newline is not printed .
width and height are strings , following the format
N : N character cells .
Npx : N pixels .
N % : N percent of the session ' s width or height .
' auto ' : The image ' s inherent size will be used to determine an appropriate
dimension .
preserve _ aspect _ ratio sets whether the aspect ratio of the image is
preserved . The default ( None ) is True unless both width and height are
set .
See https : / / www . iterm2 . com / documentation - images . html""" | with open ( os . path . realpath ( os . path . expanduser ( fn ) ) , 'rb' ) as f :
sys . stdout . buffer . write ( image_bytes ( f . read ( ) , filename = fn , width = width , height = height , preserve_aspect_ratio = preserve_aspect_ratio ) ) |
def set_pwm ( self , values ) :
"""Set pwm values on the controlled pins .
: param values : Values to set ( 0.0-1.0 ) .
: return :""" | if len ( values ) != len ( self . _pins ) :
raise ValueError ( 'Number of values has to be identical with ' 'the number of pins.' )
if not all ( 0 <= v <= 1 for v in values ) :
raise ValueError ( 'Values must be between 0 and 1.' )
for tries in range ( self . IO_TRIES ) :
try :
self . _set_pwm ( self . _to_raw_pwm ( values ) )
break
except IOError as error :
if tries == self . IO_TRIES - 1 :
raise error
self . _state = values |
def cumulative_value ( self , slip_moment , mmax , mag_value , bbar , dbar ) :
'''Returns the rate of events with M > mag _ value
: param float slip _ moment :
: param float slip _ moment :
Product of slip ( cm / yr ) * Area ( cm ^ 2 ) * shear _ modulus ( dyne - cm )
: param float mmax :
Maximum magnitude
: param float mag _ value :
Magnitude value
: param float bbar :
\b ar { b } parameter ( effectively = b * log ( 10 . ) )
: param float dbar :
\b ar { d } parameter''' | delta_m = mmax - mag_value
a_1 = self . _get_a1 ( bbar , dbar , slip_moment , mmax )
return a_1 * np . exp ( bbar * ( delta_m ) ) * ( delta_m > 0.0 ) |
def parse_time ( t ) :
"""Parse string time format to microsecond""" | if isinstance ( t , ( str , unicode ) ) :
b = re_time . match ( t )
if b :
v , unit = int ( b . group ( 1 ) ) , b . group ( 2 )
if unit == 's' :
return v * 1000
elif unit == 'm' :
return v * 60 * 1000
elif unit == 'h' :
return v * 60 * 60 * 1000
else :
return v
else :
raise TimeFormatError ( t )
elif isinstance ( t , ( int , long ) ) :
return t
else :
raise TimeFormatError ( t ) |
def Poisson ( mu : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex :
"""One to one constructor for mapping some shape of mu to
a matching shaped Poisson .
: param mu : mu with same shape as desired Poisson tensor or scalar""" | return Integer ( context . jvm_view ( ) . PoissonVertex , label , cast_to_double_vertex ( mu ) ) |
def validate_unwrap ( self , value ) :
'''Expects a list of dictionaries with ` ` k ` ` and ` ` v ` ` set to the
keys and values that will be unwrapped into the output python
dictionary should have''' | if not isinstance ( value , list ) :
self . _fail_validation_type ( value , list )
for value_dict in value :
if not isinstance ( value_dict , dict ) :
cause = BadValueException ( '' , value_dict , 'Values in a KVField list must be dicts' )
self . _fail_validation ( value , 'Values in a KVField list must be dicts' , cause = cause )
k = value_dict . get ( 'k' )
v = value_dict . get ( 'v' )
if k is None :
self . _fail_validation ( value , 'Value had None for a key' )
try :
self . key_type . validate_unwrap ( k )
except BadValueException as bve :
self . _fail_validation ( value , 'Bad value for KVField key %s' % k , cause = bve )
try :
self . value_type . validate_unwrap ( v )
except BadValueException as bve :
self . _fail_validation ( value , 'Bad value for KFVield value %s' % k , cause = bve )
return True |
def email_list_to_email_dict ( email_list ) :
"""Convert a list of email to a dict of email .""" | if email_list is None :
return { }
result = { }
for value in email_list :
realname , address = email . utils . parseaddr ( value )
result [ address ] = realname if realname and address else address
return result |
def postorder ( self ) :
"""Return the nodes in the binary tree using post - order _ traversal .
A post - order _ traversal visits left subtree , right subtree , then root .
. . _ post - order : https : / / en . wikipedia . org / wiki / Tree _ traversal
: return : List of nodes .
: rtype : [ binarytree . Node ]
* * Example * * :
. . doctest : :
> > > from binarytree import Node
> > > root = Node ( 1)
> > > root . left = Node ( 2)
> > > root . right = Node ( 3)
> > > root . left . left = Node ( 4)
> > > root . left . right = Node ( 5)
> > > print ( root )
< BLANKLINE >
_ _ 1
/ \ 2 3
/ \ 4 5
< BLANKLINE >
> > > root . postorder
[ Node ( 4 ) , Node ( 5 ) , Node ( 2 ) , Node ( 3 ) , Node ( 1 ) ]""" | node_stack = [ ]
result = [ ]
node = self
while True :
while node is not None :
if node . right is not None :
node_stack . append ( node . right )
node_stack . append ( node )
node = node . left
node = node_stack . pop ( )
if ( node . right is not None and len ( node_stack ) > 0 and node_stack [ - 1 ] is node . right ) :
node_stack . pop ( )
node_stack . append ( node )
node = node . right
else :
result . append ( node )
node = None
if len ( node_stack ) == 0 :
break
return result |
async def get ( self , * , encoding = None , decoder = None ) :
"""Wait for and return pub / sub message from one of channels .
Return value is either :
* tuple of two elements : channel & message ;
* tuple of three elements : pattern channel , ( target channel & message ) ;
* or None in case Receiver is not active or has just been stopped .
: raises aioredis . ChannelClosedError : If listener is stopped
and all messages have been received .""" | # TODO : add note about raised exception and end marker .
# Flow before ClosableQueue :
# - ch . get ( ) - > message
# - ch . close ( ) - > ch . put ( None )
# - ch . get ( ) - > None
# - ch . get ( ) - > ChannelClosedError
# Current flow :
# - ch . get ( ) - > message
# - ch . close ( ) - > ch . _ closed = True
# - ch . get ( ) - > ChannelClosedError
assert decoder is None or callable ( decoder ) , decoder
if self . _queue . exhausted :
raise ChannelClosedError ( )
obj = await self . _queue . get ( )
if obj is EndOfStream :
return
ch , msg = obj
if ch . is_pattern :
dest_ch , msg = msg
if encoding is not None :
msg = msg . decode ( encoding )
if decoder is not None :
msg = decoder ( msg )
if ch . is_pattern :
return ch , ( dest_ch , msg )
return ch , msg |
def copy_headers ( self , source_databox ) :
"""Loops over the hkeys of the source _ databox , updating this databoxes ' header .""" | for k in source_databox . hkeys :
self . insert_header ( k , source_databox . h ( k ) )
return self |
def _on_msg ( self , msg ) :
"""Handle messages from the front - end""" | data = msg [ 'content' ] [ 'data' ]
# If the message is a call invoke , run the function and send
# the results .
if 'callback' in data :
guid = data [ 'callback' ]
callback = callback_registry [ guid ]
args = data [ 'arguments' ]
args = [ self . deserialize ( a ) for a in args ]
index = data [ 'index' ]
results = callback ( * args )
return self . serialize ( self . _send ( 'return' , index = index , results = results ) )
# The message is not a call invoke , it must be an object
# that is a response to a Python request .
else :
index = data [ 'index' ]
immutable = data [ 'immutable' ]
value = data [ 'value' ]
if index in self . _callbacks :
self . _callbacks [ index ] . resolve ( { 'immutable' : immutable , 'value' : value } )
del self . _callbacks [ index ] |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'duration' ) and self . duration is not None :
_dict [ 'duration' ] = self . duration
if hasattr ( self , 'name' ) and self . name is not None :
_dict [ 'name' ] = self . name
if hasattr ( self , 'details' ) and self . details is not None :
_dict [ 'details' ] = self . details . _to_dict ( )
if hasattr ( self , 'status' ) and self . status is not None :
_dict [ 'status' ] = self . status
return _dict |
def _handle_offset_response ( self , response ) :
"""Handle responses to both OffsetRequest and OffsetFetchRequest , since
they are similar enough .
: param response :
A tuple of a single OffsetFetchResponse or OffsetResponse""" | # Got a response , clear our outstanding request deferred
self . _request_d = None
# Successful request , reset our retry delay , count , etc
self . retry_delay = self . retry_init_delay
self . _fetch_attempt_count = 1
response = response [ 0 ]
if hasattr ( response , 'offsets' ) : # It ' s a response to an OffsetRequest
self . _fetch_offset = response . offsets [ 0 ]
else : # It ' s a response to an OffsetFetchRequest
# Make sure we got a valid offset back . Kafka uses - 1 to indicate
# no committed offset was retrieved
if response . offset == OFFSET_NOT_COMMITTED :
self . _fetch_offset = OFFSET_EARLIEST
else :
self . _fetch_offset = response . offset + 1
self . _last_committed_offset = response . offset
self . _do_fetch ( ) |
def removc ( item , inset ) :
"""Remove an item from a character set .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / removc _ c . html
: param item : Item to be removed .
: type item : str
: param inset : Set to be updated .
: type inset : spiceypy . utils . support _ types . SpiceCell""" | assert isinstance ( inset , stypes . SpiceCell )
assert inset . dtype == 0
item = stypes . stringToCharP ( item )
libspice . removc_c ( item , ctypes . byref ( inset ) ) |
def service ( self ) :
"""Service the root socket
Read from the root socket and forward one datagram to a
connection . The call will return without forwarding data
if any of the following occurs :
* An error is encountered while reading from the root socket
* Reading from the root socket times out
* The root socket is non - blocking and has no data available
* An empty payload is received
* A non - empty payload is received from an unknown peer ( a peer
for which get _ connection has not yet been called ) ; in this case ,
the payload is held by this instance and will be forwarded when
the forward method is called
Return :
if the datagram received was from a new peer , then the peer ' s
address ; otherwise None""" | self . payload , self . payload_peer_address = self . datagram_socket . recvfrom ( UDP_MAX_DGRAM_LENGTH )
_logger . debug ( "Received datagram from peer: %s" , self . payload_peer_address )
if not self . payload :
self . payload_peer_address = None
return
if self . connections . has_key ( self . payload_peer_address ) :
self . forward ( )
else :
return self . payload_peer_address |
def create_permissions_from_tuples ( model , codename_tpls ) :
"""Creates custom permissions on model " model " .""" | if codename_tpls :
model_cls = django_apps . get_model ( model )
content_type = ContentType . objects . get_for_model ( model_cls )
for codename_tpl in codename_tpls :
app_label , codename , name = get_from_codename_tuple ( codename_tpl , model_cls . _meta . app_label )
try :
Permission . objects . get ( codename = codename , content_type = content_type )
except ObjectDoesNotExist :
Permission . objects . create ( name = name , codename = codename , content_type = content_type )
verify_codename_exists ( f"{app_label}.{codename}" ) |
def temp ( dev , target ) :
"""Gets or sets the target temperature .""" | click . echo ( "Current target temp: %s" % dev . target_temperature )
if target :
click . echo ( "Setting target temp: %s" % target )
dev . target_temperature = target |
def protect ( self , password = None , read_protect = False , protect_from = 0 ) :
"""Set password protection or permanent lock bits .
If the * password * argument is None , all memory pages will be
protected by setting the relevant lock bits ( note that lock
bits can not be reset ) . If valid NDEF management data is
found , protect ( ) also sets the NDEF write flag to read - only .
All Tags of the NTAG21x family can alternatively be protected
by password . If a * password * argument is provided , the
protect ( ) method writes the first 4 byte of the * password *
string into the Tag ' s password ( PWD ) memory bytes and the
following 2 byte of the * password * string into the password
acknowledge ( PACK ) memory bytes . Factory default values are
used if the * password * argument is an empty string . Lock bits
are not set for password protection .
The * read _ protect * and * protect _ from * arguments are only
evaluated if * password * is not None . If * read _ protect * is
True , the memory protection bit ( PROT ) is set to require
password verification also for reading of protected memory
pages . The value of * protect _ from * determines the first
password protected memory page ( one page is 4 byte ) with the
exception that the smallest set value is page 3 even if
* protect _ from * is smaller .""" | args = ( password , read_protect , protect_from )
return super ( NTAG21x , self ) . protect ( * args ) |
def set_head_middle_tail ( self , head_length = None , middle_length = None , tail_length = None ) :
"""Set the HEAD , MIDDLE , TAIL explicitly .
If a parameter is ` ` None ` ` , it will be ignored .
If both ` ` middle _ length ` ` and ` ` tail _ length ` ` are specified ,
only ` ` middle _ length ` ` will be applied .
: param head _ length : the length of HEAD , in seconds
: type head _ length : : class : ` ~ aeneas . exacttiming . TimeValue `
: param middle _ length : the length of MIDDLE , in seconds
: type middle _ length : : class : ` ~ aeneas . exacttiming . TimeValue `
: param tail _ length : the length of TAIL , in seconds
: type tail _ length : : class : ` ~ aeneas . exacttiming . TimeValue `
: raises : TypeError : if one of the arguments is not ` ` None ` `
or : class : ` ~ aeneas . exacttiming . TimeValue `
: raises : ValueError : if one of the arguments is greater
than the length of the audio file""" | for variable , name in [ ( head_length , "head_length" ) , ( middle_length , "middle_length" ) , ( tail_length , "tail_length" ) ] :
if ( variable is not None ) and ( not isinstance ( variable , TimeValue ) ) :
raise TypeError ( u"%s is not None or TimeValue" % name )
if ( variable is not None ) and ( variable > self . audio_length ) :
raise ValueError ( u"%s is greater than the length of the audio file" % name )
self . log ( u"Setting head middle tail..." )
mws = self . rconf . mws
self . log ( [ u"Before: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] )
if head_length is not None :
self . middle_begin = int ( head_length / mws )
if middle_length is not None :
self . middle_end = self . middle_begin + int ( middle_length / mws )
elif tail_length is not None :
self . middle_end = self . all_length - int ( tail_length / mws )
self . log ( [ u"After: 0 %d %d %d" , self . middle_begin , self . middle_end , self . all_length ] )
self . log ( u"Setting head middle tail... done" ) |
def get_size ( self , bucket : str , key : str ) -> int :
"""Retrieves the filesize
: param bucket : the bucket the object resides in .
: param key : the key of the object for which size is being retrieved .
: return : integer equal to filesize in bytes""" | blob_obj = self . _get_blob_obj ( bucket , key )
return blob_obj . size |
def search ( self , dstpath , ** params ) :
"""For compatibility with generic image catalog search .""" | self . logger . debug ( "search params=%s" % ( str ( params ) ) )
ra , dec = params [ 'ra' ] , params [ 'dec' ]
if not ( ':' in ra ) : # Assume RA and DEC are in degrees
ra_deg = float ( ra )
dec_deg = float ( dec )
else : # Assume RA and DEC are in standard string notation
ra_deg = wcs . hmsStrToDeg ( ra )
dec_deg = wcs . dmsStrToDeg ( dec )
# Convert to degrees for search
wd_deg = float ( params [ 'width' ] ) / 60.0
ht_deg = float ( params [ 'height' ] ) / 60.0
# wd _ deg = float ( params [ ' width ' ] )
# ht _ deg = float ( params [ ' height ' ] )
# initialize our query object with the service ' s base URL
query = pyvo . sia . SIAQuery ( self . url )
query . ra = ra_deg
query . dec = dec_deg
query . size = ( wd_deg , ht_deg )
query . format = 'image/fits'
self . logger . info ( "Will query: %s" % query . getqueryurl ( True ) )
results = query . execute ( )
if len ( results ) > 0 :
self . logger . info ( "Found %d images" % len ( results ) )
else :
self . logger . warning ( "Found no images in this area" % len ( results ) )
return None
# For now , we pick the first one found
# REQUIRES FIX IN PYVO :
# imfile = results [ 0 ] . cachedataset ( dir = " / tmp " )
# Workaround :
fitspath = results [ 0 ] . make_dataset_filename ( dir = "/tmp" )
results [ 0 ] . cachedataset ( fitspath )
# explicit return
return fitspath |
def result ( self ) :
"""Return the value at an address , optionally waiting until it is
set from the context _ manager , or set based on the pre - fetch mechanism .
Returns :
( bytes ) : The opaque value for an address .""" | if self . _read_only :
return self . _result
with self . _condition :
if self . _wait_for_tree and not self . _result_set_in_context :
self . _condition . wait_for ( lambda : self . _tree_has_set or self . _result_set_in_context )
return self . _result |
def has_table ( table_name , con , schema = None ) :
"""Check if DataBase has named table .
Parameters
table _ name : string
Name of SQL table .
con : SQLAlchemy connectable ( engine / connection ) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library .
If a DBAPI2 object , only sqlite3 is supported .
schema : string , default None
Name of SQL schema in database to write to ( if database flavor supports
this ) . If None , use default schema ( default ) .
Returns
boolean""" | pandas_sql = pandasSQL_builder ( con , schema = schema )
return pandas_sql . has_table ( table_name ) |
def is_group_name_exists ( self , group_name ) :
"""check if group with given name is already exists""" | groups = self . m [ "groups" ]
for g in groups :
if ( g [ "group_name" ] == group_name ) :
return True
return False |
def cleanup ( self ) :
"""Do the final clean up before shutting down .""" | size = settings . get ( 'history_size' )
self . _save_history_to_file ( self . commandprompthistory , self . _cmd_hist_file , size = size )
self . _save_history_to_file ( self . senderhistory , self . _sender_hist_file , size = size )
self . _save_history_to_file ( self . recipienthistory , self . _recipients_hist_file , size = size ) |
def get_history ( self , exp , rep , tags ) :
"""returns the whole history for one experiment and one repetition .
tags can be a string or a list of strings . if tags is a string ,
the history is returned as list of values , if tags is a list of
strings or ' all ' , history is returned as a dictionary of lists
of values .""" | params = self . get_params ( exp )
if params == None :
raise SystemExit ( 'experiment %s not found.' % exp )
# make list of tags , even if it is only one
if tags != 'all' and not hasattr ( tags , '__iter__' ) :
tags = [ tags ]
results = { }
logfile = os . path . join ( exp , '%i.log' % rep )
try :
f = open ( logfile )
except IOError :
if len ( tags ) == 1 :
return [ ]
else :
return { }
for line in f :
dic = json . loads ( line )
for tag in tags :
if not tag in results :
results [ tag ] = [ ]
if tag in dic :
results [ tag ] . append ( dic [ tag ] )
else :
results [ tag ] . append ( None )
f . close ( )
if len ( results ) == 0 :
if len ( tags ) == 1 :
return [ ]
else :
return { }
# raise ValueError ( ' tag ( s ) not found : % s ' % str ( tags ) )
if len ( tags ) == 1 :
return results [ results . keys ( ) [ 0 ] ]
else :
return results |
def write_head ( self , title , css_path , default_css ) :
"""Writes the head part for the generated document ,
with the given title and CSS""" | self . title = title
self . write ( '''<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<title>{title}</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<link id="style" href="{rel_css}/docs.dark.css" rel="stylesheet">
<script>
document.getElementById("style").href = "{rel_css}/docs."
+ (localStorage.getItem("theme") || "{def_css}")
+ ".css";
</script>
<link href="https://fonts.googleapis.com/css?family=Nunito|Source+Code+Pro"
rel="stylesheet">
</head>
<body>
<div id="main_div">''' , title = title , rel_css = self . _rel ( css_path ) , def_css = default_css ) |
def compute_taxes ( self , precision = None ) :
'''Returns the total amount of taxes for this group .
@ param precision : int Number of decimal places
@ return : Decimal''' | return sum ( [ group . compute_taxes ( precision ) for group in self . __groups ] ) |
def psiblast_n_neighbors ( seqs , n = 100 , blast_db = None , core_threshold = 1e-50 , extra_threshold = 1e-10 , lower_threshold = 1e-6 , step = 100 , method = "two-step" , blast_mat_root = None , params = { } , add_seq_names = False , WorkingDir = None , SuppressStderr = None , SuppressStdout = None , input_handler = None , scorer = 3 , # shotgun with 3 hits needed to keep
second_db = None ) :
"""PsiBlasts sequences , stopping when n neighbors are reached .
core _ threshold : threshold for the core profile ( default : 1e - 50)
extra _ threshold : threshold for pulling in additional seqs ( default : 1e - 10)
lower _ threshold : threshold for seqs in final round ( default : 1e - 6)
seqs : either file name or list of sequence objects or list of strings or
single multiline string containing sequences .
If you want to skip the detection and force a specific type of input
handler , use input _ handler = ' your _ favorite _ handler ' .
add _ seq _ names : boolean . if True , sequence names are inserted in the list
of sequences . if False , it assumes seqs is a list of lines of some
proper format that the program can handle""" | if blast_db :
params [ "-d" ] = blast_db
ih = input_handler or guess_input_handler ( seqs , add_seq_names )
recs = seqs_to_stream ( seqs , ih )
# checkpointing can only handle one seq . . .
# set up the parameters for the core and additional runs
max_iterations = params [ '-j' ]
params [ '-j' ] = 2
# won ' t checkpoint with single iteration
app = PsiBlast ( params = params , blast_mat_root = blast_mat_root , InputHandler = '_input_as_lines' , WorkingDir = WorkingDir , SuppressStderr = SuppressStderr , SuppressStdout = SuppressStdout , )
result = { }
for seq in recs :
query_id = seq [ 0 ] [ 1 : ] . split ( None , 1 ) [ 0 ]
if method == "two-step" :
result [ query_id ] = ids_from_seq_two_step ( seq , n , max_iterations , app , core_threshold , extra_threshold , lower_threshold , second_db )
elif method == "lower_threshold" :
result [ query_id ] = ids_from_seq_lower_threshold ( seq , n , max_iterations , app , core_threshold , lower_threshold , step )
elif method == "iterative" :
result [ query_id ] = ids_from_seqs_iterative ( seq , app , QMEPsiBlast9 , scorer , params [ '-j' ] , n )
else :
raise TypeError , "Got unknown method %s" % method
params [ '-j' ] = max_iterations
return result |
def _monitoring_resource_types_list ( args , _ ) :
"""Lists the resource descriptors in the project .""" | project_id = args [ 'project' ]
pattern = args [ 'type' ] or '*'
descriptors = gcm . ResourceDescriptors ( context = _make_context ( project_id ) )
dataframe = descriptors . as_dataframe ( pattern = pattern )
return _render_dataframe ( dataframe ) |
def do_version ( ) :
"""Return version details of the running server api""" | v = ApiPool . ping . model . Version ( name = ApiPool ( ) . current_server_name , version = ApiPool ( ) . current_server_api . get_version ( ) , container = get_container_version ( ) , )
log . info ( "/version: " + pprint . pformat ( v ) )
return v |
def create ( cls , pid , idle_ttl = DEFAULT_IDLE_TTL , max_size = DEFAULT_MAX_SIZE , time_method = None ) :
"""Create a new pool , with the ability to pass in values to override
the default idle TTL and the default maximum size .
A pool ' s idle TTL defines the amount of time that a pool can be open
without any sessions before it is removed .
A pool ' s max size defines the maximum number of connections that can
be added to the pool to prevent unbounded open connections .
: param str pid : The pool ID
: param int idle _ ttl : Time in seconds for the idle TTL
: param int max _ size : The maximum pool size
: param callable time _ method : Override the use of : py : meth : ` time . time `
method for time values .
: raises : KeyError""" | if pid in cls . _pools :
raise KeyError ( 'Pool %s already exists' % pid )
with cls . _lock :
LOGGER . debug ( "Creating Pool: %s (%i/%i)" , pid , idle_ttl , max_size )
cls . _pools [ pid ] = Pool ( pid , idle_ttl , max_size , time_method ) |
def register_model ( cls , model ) :
"""Register a model class according to its remote name
Args :
model : the model to register""" | rest_name = model . rest_name
resource_name = model . resource_name
if rest_name not in cls . _model_rest_name_registry :
cls . _model_rest_name_registry [ rest_name ] = [ model ]
cls . _model_resource_name_registry [ resource_name ] = [ model ]
elif model not in cls . _model_rest_name_registry [ rest_name ] :
cls . _model_rest_name_registry [ rest_name ] . append ( model )
cls . _model_resource_name_registry [ resource_name ] . append ( model ) |
def set_children_padding ( widget , types , height = None , width = None ) :
"""Sets given Widget children padding .
: param widget : Widget to sets the children padding .
: type widget : QWidget
: param types : Children types .
: type types : tuple or list
: param height : Height padding .
: type height : int
: param width : Width padding .
: type width : int
: return : Definition success .
: rtype : bool""" | for type in types :
for child in widget . findChildren ( type ) :
child . setStyleSheet ( "{0}{{height: {1}px; width: {2}px;}}" . format ( type . __name__ , child . fontMetrics ( ) . height ( ) + ( height if height is not None else 0 ) * 2 , child . fontMetrics ( ) . width ( child . text ( ) ) + ( width if width is not None else 0 ) * 2 ) )
return True |
def start ( self ) :
"""Find the first data entry and prepare to parse .""" | while not self . is_start ( self . current_tag ) :
self . next ( )
self . new_entry ( ) |
def key ( state , host , key = None , keyserver = None , keyid = None ) :
'''Add apt gpg keys with ` ` apt - key ` ` .
+ key : filename or URL
+ keyserver : URL of keyserver to fetch key from
+ keyid : key identifier when using keyserver
Note :
Always returns an add command , not state checking .
keyserver / id :
These must be provided together .''' | if key : # If URL , wget the key to stdout and pipe into apt - key , because the " adv "
# apt - key passes to gpg which doesn ' t always support https !
if urlparse ( key ) . scheme :
yield 'wget -O- {0} | apt-key add -' . format ( key )
else :
yield 'apt-key add {0}' . format ( key )
if keyserver and keyid :
yield 'apt-key adv --keyserver {0} --recv-keys {1}' . format ( keyserver , keyid ) |
def connect ( self , pin = None ) :
"""Opens the port and initializes the modem and SIM card
: param pin : The SIM card PIN code , if any
: type pin : str
: raise PinRequiredError : if the SIM card requires a PIN but none was provided
: raise IncorrectPinError : if the specified PIN is incorrect""" | self . log . info ( 'Connecting to modem on port %s at %dbps' , self . port , self . baudrate )
super ( GsmModem , self ) . connect ( )
# Send some initialization commands to the modem
try :
self . write ( 'ATZ' )
# reset configuration
except CommandError : # Some modems require a SIM PIN at this stage already ; unlock it now
# Attempt to enable detailed error messages ( to catch incorrect PIN error )
# but ignore if it fails
self . write ( 'AT+CMEE=1' , parseError = False )
self . _unlockSim ( pin )
pinCheckComplete = True
self . write ( 'ATZ' )
# reset configuration
else :
pinCheckComplete = False
self . write ( 'ATE0' )
# echo off
try :
cfun = int ( lineStartingWith ( '+CFUN:' , self . write ( 'AT+CFUN?' ) ) [ 7 : ] )
# example response : + CFUN : 1
if cfun != 1 :
self . write ( 'AT+CFUN=1' )
except CommandError :
pass
# just ignore if the + CFUN command isn ' t supported
self . write ( 'AT+CMEE=1' )
# enable detailed error messages ( even if it has already been set - ATZ may reset this )
if not pinCheckComplete :
self . _unlockSim ( pin )
# Get list of supported commands from modem
commands = self . supportedCommands
# Device - specific settings
callUpdateTableHint = 0
# unknown modem
enableWind = False
if commands != None :
if '^CVOICE' in commands :
self . write ( 'AT^CVOICE=0' , parseError = False )
# Enable voice calls
if '+VTS' in commands : # Check for DTMF sending support
Call . dtmfSupport = True
elif '^DTMF' in commands : # Huawei modems use ^ DTMF to send DTMF tones
callUpdateTableHint = 1
# Huawei
if '^USSDMODE' in commands : # Enable Huawei text - mode USSD
self . write ( 'AT^USSDMODE=0' , parseError = False )
if '+WIND' in commands :
callUpdateTableHint = 2
# Wavecom
enableWind = True
elif '+ZPAS' in commands :
callUpdateTableHint = 3
# ZTE
else : # Try to enable general notifications on Wavecom - like device
enableWind = True
if enableWind :
try :
wind = lineStartingWith ( '+WIND:' , self . write ( 'AT+WIND?' ) )
# Check current WIND value ; example response : + WIND : 63
except CommandError : # Modem does not support + WIND notifications . See if we can detect other known call update notifications
pass
else : # Enable notifications for call setup , hangup , etc
if int ( wind [ 7 : ] ) != 50 :
self . write ( 'AT+WIND=50' )
callUpdateTableHint = 2
# Wavecom
# Attempt to identify modem type directly ( if not already ) - for outgoing call status updates
if callUpdateTableHint == 0 :
if self . manufacturer . lower ( ) == 'huawei' :
callUpdateTableHint = 1
# huawei
else : # See if this is a ZTE modem that has not yet been identified based on supported commands
try :
self . write ( 'AT+ZPAS?' )
except CommandError :
pass
# Not a ZTE modem
else :
callUpdateTableHint = 3
# ZTE
# Load outgoing call status updates based on identified modem features
if callUpdateTableHint == 1 : # Use Hauwei ' s ^ NOTIFICATIONs
self . log . info ( 'Loading Huawei call state update table' )
self . _callStatusUpdates = ( ( re . compile ( r'^\^ORIG:(\d),(\d)$' ) , self . _handleCallInitiated ) , ( re . compile ( r'^\^CONN:(\d),(\d)$' ) , self . _handleCallAnswered ) , ( re . compile ( r'^\^CEND:(\d),(\d),(\d)+,(\d)+$' ) , self . _handleCallEnded ) )
self . _mustPollCallStatus = False
# Huawei modems use ^ DTMF to send DTMF tones ; use that instead
Call . DTMF_COMMAND_BASE = '^DTMF={cid},'
Call . dtmfSupport = True
elif callUpdateTableHint == 2 : # Wavecom modem : + WIND notifications supported
self . log . info ( 'Loading Wavecom call state update table' )
self . _callStatusUpdates = ( ( re . compile ( r'^\+WIND: 5,(\d)$' ) , self . _handleCallInitiated ) , ( re . compile ( r'^OK$' ) , self . _handleCallAnswered ) , ( re . compile ( r'^\+WIND: 6,(\d)$' ) , self . _handleCallEnded ) )
self . _waitForAtdResponse = False
# Wavecom modems return OK only when the call is answered
self . _mustPollCallStatus = False
if commands == None : # older modem , assume it has standard DTMF support
Call . dtmfSupport = True
elif callUpdateTableHint == 3 : # ZTE
# Use ZTE notifications ( " CONNECT " / " HANGUP " , but no " call initiated " notification )
self . log . info ( 'Loading ZTE call state update table' )
self . _callStatusUpdates = ( ( re . compile ( r'^CONNECT$' ) , self . _handleCallAnswered ) , ( re . compile ( r'^HANGUP:\s*(\d+)$' ) , self . _handleCallEnded ) , ( re . compile ( r'^OK$' ) , self . _handleCallRejected ) )
self . _waitForAtdResponse = False
# ZTE modems do not return an immediate OK only when the call is answered
self . _mustPollCallStatus = False
self . _waitForCallInitUpdate = False
# ZTE modems do not provide " call initiated " updates
if commands == None : # ZTE uses standard + VTS for DTMF
Call . dtmfSupport = True
else : # Unknown modem - we do not know what its call updates look like . Use polling instead
self . log . info ( 'Unknown/generic modem type - will use polling for call state updates' )
self . _mustPollCallStatus = True
self . _pollCallStatusRegex = re . compile ( '^\+CLCC:\s+(\d+),(\d),(\d),(\d),([^,]),"([^,]*)",(\d+)$' )
self . _waitForAtdResponse = True
# Most modems return OK immediately after issuing ATD
# General meta - information setup
self . write ( 'AT+COPS=3,0' , parseError = False )
# Use long alphanumeric name format
# SMS setup
self . write ( 'AT+CMGF={0}' . format ( 1 if self . _smsTextMode else 0 ) )
# Switch to text or PDU mode for SMS messages
self . _compileSmsRegexes ( )
if self . _smscNumber != None :
self . write ( 'AT+CSCA="{0}"' . format ( self . _smscNumber ) )
# Set default SMSC number
currentSmscNumber = self . _smscNumber
else :
currentSmscNumber = self . smsc
# Some modems delete the SMSC number when setting text - mode SMS parameters ; preserve it if needed
if currentSmscNumber != None :
self . _smscNumber = None
# clear cache
self . write ( 'AT+CSMP=49,167,0,0' , parseError = False )
# Enable delivery reports
# . . . check SMSC again to ensure it did not change
if currentSmscNumber != None and self . smsc != currentSmscNumber :
self . smsc = currentSmscNumber
# Set message storage , but first check what the modem supports - example response : + CPMS : ( ( " SM " , " BM " , " SR " ) , ( " SM " ) )
try :
cpmsLine = lineStartingWith ( '+CPMS' , self . write ( 'AT+CPMS=?' ) )
except CommandError : # Modem does not support AT + CPMS ; SMS reading unavailable
self . _smsReadSupported = False
self . log . warning ( 'SMS preferred message storage query not supported by modem. SMS reading unavailable.' )
else :
cpmsSupport = cpmsLine . split ( ' ' , 1 ) [ 1 ] . split ( '),(' )
# Do a sanity check on the memory types returned - Nokia S60 devices return empty strings , for example
for memItem in cpmsSupport :
if len ( memItem ) == 0 : # No support for reading stored SMS via AT commands - probably a Nokia S60
self . _smsReadSupported = False
self . log . warning ( 'Invalid SMS message storage support returned by modem. SMS reading unavailable. Response was: "%s"' , cpmsLine )
break
else : # Suppported memory types look fine , continue
preferredMemoryTypes = ( '"ME"' , '"SM"' , '"SR"' )
cpmsItems = [ '' ] * len ( cpmsSupport )
for i in xrange ( len ( cpmsSupport ) ) :
for memType in preferredMemoryTypes :
if memType in cpmsSupport [ i ] :
if i == 0 :
self . _smsMemReadDelete = memType
cpmsItems [ i ] = memType
break
self . write ( 'AT+CPMS={0}' . format ( ',' . join ( cpmsItems ) ) )
# Set message storage
del cpmsSupport
del cpmsLine
if self . _smsReadSupported :
try :
self . write ( 'AT+CNMI=2,1,0,2' )
# Set message notifications
except CommandError : # Message notifications not supported
self . _smsReadSupported = False
self . log . warning ( 'Incoming SMS notifications not supported by modem. SMS receiving unavailable.' )
# Incoming call notification setup
try :
self . write ( 'AT+CLIP=1' )
# Enable calling line identification presentation
except CommandError as clipError :
self . _callingLineIdentification = False
self . log . warning ( 'Incoming call calling line identification (caller ID) not supported by modem. Error: {0}' . format ( clipError ) )
else :
self . _callingLineIdentification = True
try :
self . write ( 'AT+CRC=1' )
# Enable extended format of incoming indication ( optional )
except CommandError as crcError :
self . _extendedIncomingCallIndication = False
self . log . warning ( 'Extended format incoming call indication not supported by modem. Error: {0}' . format ( crcError ) )
else :
self . _extendedIncomingCallIndication = True
# Call control setup
self . write ( 'AT+CVHU=0' , parseError = False ) |
def is_possible_hour ( self , hour ) :
"""Check if a float hour is a possible hour for this analysis period .""" | if hour > 23 and self . is_possible_hour ( 0 ) :
hour = int ( hour )
if not self . _is_overnight :
return self . st_time . hour <= hour <= self . end_time . hour
else :
return self . st_time . hour <= hour <= 23 or 0 <= hour <= self . end_time . hour |
def handle_query_error ( msg , query , session , payload = None ) :
"""Local method handling error while processing the SQL""" | payload = payload or { }
troubleshooting_link = config [ 'TROUBLESHOOTING_LINK' ]
query . error_message = msg
query . status = QueryStatus . FAILED
query . tmp_table_name = None
session . commit ( )
payload . update ( { 'status' : query . status , 'error' : msg , } )
if troubleshooting_link :
payload [ 'link' ] = troubleshooting_link
return payload |
def profiles ( self ) :
"""A list of all profiles on this web property . You may
select a specific profile using its name , its id
or an index .
` ` ` python
property . profiles [ 0]
property . profiles [ ' 9234823 ' ]
property . profiles [ ' marketing profile ' ]""" | raw_profiles = self . account . service . management ( ) . profiles ( ) . list ( accountId = self . account . id , webPropertyId = self . id ) . execute ( ) [ 'items' ]
profiles = [ Profile ( raw , self ) for raw in raw_profiles ]
return addressable . List ( profiles , indices = [ 'id' , 'name' ] , insensitive = True ) |
def items ( self ) :
"""Iterator over the words in the dictionary
Yields :
str : The next word in the dictionary
int : The number of instances in the dictionary
Note :
This is the same as ` dict . items ( ) `""" | for word in self . _dictionary . keys ( ) :
yield word , self . _dictionary [ word ] |
def get_channel_by_channel_id ( self , channel_id ) :
"""Get a channel by channel id""" | self . _validate_uuid ( channel_id )
url = "/notification/v1/channel/{}" . format ( channel_id )
response = NWS_DAO ( ) . getURL ( url , self . _read_headers )
if response . status != 200 :
raise DataFailureException ( url , response . status , response . data )
data = json . loads ( response . data )
return self . _channel_from_json ( data . get ( "Channel" ) ) |
def calc_factors_grid ( self , spatial_reference , zone_array = None , minpts_interp = 1 , maxpts_interp = 20 , search_radius = 1.0e+10 , verbose = False , var_filename = None , forgive = False ) :
"""calculate kriging factors ( weights ) for a structured grid .
Parameters
spatial _ reference : ( flopy . utils . reference . SpatialReference )
a spatial reference that describes the orientation and
spatail projection of the the structured grid
zone _ array : ( numpy . ndarray )
an integer array of zones to use for kriging . If not None ,
then point _ data must also contain a " zone " column . point _ data
entries with a zone value not found in zone _ array will be skipped .
If None , then all point _ data will ( potentially ) be used for
interpolating each grid node . Default is None
minpts _ interp : ( int )
minimum number of point _ data entires to use for interpolation at
a given grid node . grid nodes with less than minpts _ interp
point _ data found will be skipped ( assigned np . NaN ) . Defaut is 1
maxpts _ interp : ( int )
maximum number of point _ data entries to use for interpolation at
a given grid node . A larger maxpts _ interp will yield " smoother "
interplation , but using a large maxpts _ interp will slow the
( already ) slow kriging solution process and may lead to
memory errors . Default is 20.
search _ radius : ( float )
the size of the region around a given grid node to search for
point _ data entries . Default is 1.0e + 10
verbose : ( boolean )
a flag to echo process to stdout during the interpolatino process .
Default is False
var _ filename : ( str )
a filename to save the kriging variance for each interpolated grid node .
Default is None .
forgive : ( boolean )
flag to continue if inversion of the kriging matrix failes at one or more
grid nodes . Inversion usually fails if the kriging matrix is singular ,
resulting from point _ data entries closer than EPSILON distance . If True ,
warnings are issued for each failed inversion . If False , an exception
is raised for failed matrix inversion .
Returns
df : pandas . DataFrame
a dataframe with information summarizing the ordinary kriging
process for each grid node
Note
this method calls OrdinaryKrige . calc _ factors ( )
Example
` ` > > > import flopy ` `
` ` > > > import pyemu ` `
` ` > > > v = pyemu . utils . geostats . ExpVario ( a = 1000 , contribution = 1.0 ) ` `
` ` > > > gs = pyemu . utils . geostats . GeoStruct ( variograms = v , nugget = 0.5 ) ` `
` ` > > > pp _ df = pyemu . pp _ utils . pp _ file _ to _ dataframe ( " hkpp . dat " ) ` `
` ` > > > ok = pyemu . utils . geostats . OrdinaryKrige ( gs , pp _ df ) ` `
` ` > > > m = flopy . modflow . Modflow . load ( " mymodel . nam " ) ` `
` ` > > > df = ok . calc _ factors _ grid ( m . sr , zone _ array = m . bas6 . ibound [ 0 ] . array , ` `
` ` > > > var _ filename = " ok _ var . dat " ) ` `
` ` > > > ok . to _ grid _ factor _ file ( " factors . dat " ) ` `""" | self . spatial_reference = spatial_reference
self . interp_data = None
# assert isinstance ( spatial _ reference , SpatialReference )
try :
x = self . spatial_reference . xcentergrid . copy ( )
y = self . spatial_reference . ycentergrid . copy ( )
except Exception as e :
raise Exception ( "spatial_reference does not have proper attributes:{0}" . format ( str ( e ) ) )
if var_filename is not None :
arr = np . zeros ( ( self . spatial_reference . nrow , self . spatial_reference . ncol ) ) - 1.0e+30
# the simple case of no zone array : ignore point _ data zones
if zone_array is None :
df = self . calc_factors ( x . ravel ( ) , y . ravel ( ) , minpts_interp = minpts_interp , maxpts_interp = maxpts_interp , search_radius = search_radius , verbose = verbose , forgive = forgive )
if var_filename is not None :
arr = df . err_var . values . reshape ( x . shape )
np . savetxt ( var_filename , arr , fmt = "%15.6E" )
if zone_array is not None :
assert zone_array . shape == x . shape
if "zone" not in self . point_data . columns :
warnings . warn ( "'zone' columns not in point_data, assigning generic zone" , PyemuWarning )
self . point_data . loc [ : , "zone" ] = 1
pt_data_zones = self . point_data . zone . unique ( )
dfs = [ ]
for pt_data_zone in pt_data_zones :
if pt_data_zone not in zone_array :
warnings . warn ( "pt zone {0} not in zone array {1}, skipping" . format ( pt_data_zone , np . unique ( zone_array ) ) , PyemuWarning )
continue
xzone , yzone = x . copy ( ) , y . copy ( )
xzone [ zone_array != pt_data_zone ] = np . NaN
yzone [ zone_array != pt_data_zone ] = np . NaN
df = self . calc_factors ( xzone . ravel ( ) , yzone . ravel ( ) , minpts_interp = minpts_interp , maxpts_interp = maxpts_interp , search_radius = search_radius , verbose = verbose , pt_zone = pt_data_zone , forgive = forgive )
dfs . append ( df )
if var_filename is not None :
a = df . err_var . values . reshape ( x . shape )
na_idx = np . isfinite ( a )
arr [ na_idx ] = a [ na_idx ]
if self . interp_data is None or self . interp_data . dropna ( ) . shape [ 0 ] == 0 :
raise Exception ( "no interpolation took place...something is wrong" )
df = pd . concat ( dfs )
if var_filename is not None :
np . savetxt ( var_filename , arr , fmt = "%15.6E" )
return df |
def glob7s ( p , Input , flags ) :
'''/ * VERSION OF GLOBE FOR LOWER ATMOSPHERE 10/26/99''' | pset = 2.0
t = [ 0.0 ] * 14
dr = 1.72142E-2 ;
dgtr = 1.74533E-2 ;
# / * confirm parameter set * /
if ( p [ 99 ] == 0 ) : # pragma : no cover
p [ 99 ] = pset ;
# for j in range ( 14 ) : # Already taken care of
# t [ j ] = 0.0;
cd32 = cos ( dr * ( Input . doy - p [ 31 ] ) ) ;
cd18 = cos ( 2.0 * dr * ( Input . doy - p [ 17 ] ) ) ;
cd14 = cos ( dr * ( Input . doy - p [ 13 ] ) ) ;
cd39 = cos ( 2.0 * dr * ( Input . doy - p [ 38 ] ) ) ;
p32 = p [ 31 ] ;
p18 = p [ 17 ] ;
p14 = p [ 13 ] ;
p39 = p [ 38 ] ;
# / * F10.7 * /
t [ 0 ] = p [ 21 ] * dfa ;
# / * time independent * /
t [ 1 ] = p [ 1 ] * plg [ 0 ] [ 2 ] + p [ 2 ] * plg [ 0 ] [ 4 ] + p [ 22 ] * plg [ 0 ] [ 6 ] + p [ 26 ] * plg [ 0 ] [ 1 ] + p [ 14 ] * plg [ 0 ] [ 3 ] + p [ 59 ] * plg [ 0 ] [ 5 ] ;
# / * SYMMETRICAL ANNUAL * /
t [ 2 ] = ( p [ 18 ] + p [ 47 ] * plg [ 0 ] [ 2 ] + p [ 29 ] * plg [ 0 ] [ 4 ] ) * cd32 ;
# / * SYMMETRICAL SEMIANNUAL * /
t [ 3 ] = ( p [ 15 ] + p [ 16 ] * plg [ 0 ] [ 2 ] + p [ 30 ] * plg [ 0 ] [ 4 ] ) * cd18 ;
# / * ASYMMETRICAL ANNUAL * /
t [ 4 ] = ( p [ 9 ] * plg [ 0 ] [ 1 ] + p [ 10 ] * plg [ 0 ] [ 3 ] + p [ 20 ] * plg [ 0 ] [ 5 ] ) * cd14 ;
# / * ASYMMETRICAL SEMIANNUAL * /
t [ 5 ] = ( p [ 37 ] * plg [ 0 ] [ 1 ] ) * cd39 ;
# / * DIURNAL * /
if ( flags . sw [ 7 ] ) :
t71 = p [ 11 ] * plg [ 1 ] [ 2 ] * cd14 * flags . swc [ 5 ] ;
t72 = p [ 12 ] * plg [ 1 ] [ 2 ] * cd14 * flags . swc [ 5 ] ;
t [ 6 ] = ( ( p [ 3 ] * plg [ 1 ] [ 1 ] + p [ 4 ] * plg [ 1 ] [ 3 ] + t71 ) * ctloc + ( p [ 6 ] * plg [ 1 ] [ 1 ] + p [ 7 ] * plg [ 1 ] [ 3 ] + t72 ) * stloc ) ;
# / * SEMIDIURNAL * /
if ( flags . sw [ 8 ] ) :
t81 = ( p [ 23 ] * plg [ 2 ] [ 3 ] + p [ 35 ] * plg [ 2 ] [ 5 ] ) * cd14 * flags . swc [ 5 ] ;
t82 = ( p [ 33 ] * plg [ 2 ] [ 3 ] + p [ 36 ] * plg [ 2 ] [ 5 ] ) * cd14 * flags . swc [ 5 ] ;
t [ 7 ] = ( ( p [ 5 ] * plg [ 2 ] [ 2 ] + p [ 41 ] * plg [ 2 ] [ 4 ] + t81 ) * c2tloc + ( p [ 8 ] * plg [ 2 ] [ 2 ] + p [ 42 ] * plg [ 2 ] [ 4 ] + t82 ) * s2tloc ) ;
# / * TERDIURNAL * /
if ( flags . sw [ 14 ] ) :
t [ 13 ] = p [ 39 ] * plg [ 3 ] [ 3 ] * s3tloc + p [ 40 ] * plg [ 3 ] [ 3 ] * c3tloc ;
# / * MAGNETIC ACTIVITY * /
if ( flags . sw [ 9 ] ) :
if ( flags . sw [ 9 ] == 1 ) :
t [ 8 ] = apdf * ( p [ 32 ] + p [ 45 ] * plg [ 0 ] [ 2 ] * flags . swc [ 2 ] ) ;
if ( flags . sw [ 9 ] == - 1 ) :
t [ 8 ] = ( p [ 50 ] * apt [ 0 ] + p [ 96 ] * plg [ 0 ] [ 2 ] * apt [ 0 ] * flags . swc [ 2 ] ) ;
# / * LONGITUDINAL * /
if ( not ( ( flags . sw [ 10 ] == 0 ) or ( flags . sw [ 11 ] == 0 ) or ( Input . g_long <= - 1000.0 ) ) ) :
t [ 10 ] = ( 1.0 + plg [ 0 ] [ 1 ] * ( p [ 80 ] * flags . swc [ 5 ] * cos ( dr * ( Input . doy - p [ 81 ] ) ) + p [ 85 ] * flags . swc [ 6 ] * cos ( 2.0 * dr * ( Input . doy - p [ 86 ] ) ) ) + p [ 83 ] * flags . swc [ 3 ] * cos ( dr * ( Input . doy - p [ 84 ] ) ) + p [ 87 ] * flags . swc [ 4 ] * cos ( 2.0 * dr * ( Input . doy - p [ 88 ] ) ) ) * ( ( p [ 64 ] * plg [ 1 ] [ 2 ] + p [ 65 ] * plg [ 1 ] [ 4 ] + p [ 66 ] * plg [ 1 ] [ 6 ] + p [ 74 ] * plg [ 1 ] [ 1 ] + p [ 75 ] * plg [ 1 ] [ 3 ] + p [ 76 ] * plg [ 1 ] [ 5 ] ) * cos ( dgtr * Input . g_long ) + ( p [ 90 ] * plg [ 1 ] [ 2 ] + p [ 91 ] * plg [ 1 ] [ 4 ] + p [ 92 ] * plg [ 1 ] [ 6 ] + p [ 77 ] * plg [ 1 ] [ 1 ] + p [ 78 ] * plg [ 1 ] [ 3 ] + p [ 79 ] * plg [ 1 ] [ 5 ] ) * sin ( dgtr * Input . g_long ) ) ;
tt = 0 ;
for i in range ( 14 ) :
tt += abs ( flags . sw [ i + 1 ] ) * t [ i ] ;
return tt ; |
def _start_recording ( self , * args , ** kwargs ) :
"""Starts recording
Parameters
* args : any
Ordinary args used for calling the specified data sampler method
* * kwargs : any
Keyword args used for calling the specified data sampler method""" | while not self . _cmds_q . empty ( ) :
self . _cmds_q . get_nowait ( )
while not self . _data_qs [ self . _cur_data_segment ] . empty ( ) :
self . _data_qs [ self . _cur_data_segment ] . get_nowait ( )
self . _args = args
self . _kwargs = kwargs
self . _recording = True
self . start ( ) |
def cleanup ( self , sched , coro ) :
"""Remove this coro from the waiting for signal queue .""" | try :
sched . sigwait [ self . name ] . remove ( ( self , coro ) )
except ValueError :
pass
return True |
def delete_object ( self , id ) :
"""Deletes the object with the given ID from the graph .""" | # x = self . request ( id , post _ args = { " method " : " delete " } )
params = urllib . parse . urlencode ( { "method" : "delete" , 'access_token' : str ( id ) } )
u = requests . get ( "https://graph.facebook.com/" + str ( id ) + "?" + params )
groups = u . json ( )
return groups |
def send_attachment_url ( self , recipient_id , attachment_type , attachment_url , notification_type = NotificationType . regular ) :
"""Send an attachment to the specified recipient using URL .
Input :
recipient _ id : recipient id to send to
attachment _ type : type of attachment ( image , video , audio , file )
attachment _ url : URL of attachment
Output :
Response from API as < dict >""" | return self . send_message ( recipient_id , { 'attachment' : { 'type' : attachment_type , 'payload' : { 'url' : attachment_url } } } , notification_type ) |
def _finalise_figure ( fig , ** kwargs ) : # pragma : no cover
"""Internal function to wrap up a figure .
Possible arguments :
: type title : str
: type show : bool
: type save : bool
: type savefile : str
: type return _ figure : bool""" | title = kwargs . get ( "title" ) or None
show = kwargs . get ( "show" ) or False
save = kwargs . get ( "save" ) or False
savefile = kwargs . get ( "savefile" ) or "EQcorrscan_figure.png"
return_fig = kwargs . get ( "return_figure" ) or False
if title :
fig . suptitle ( title )
if show :
fig . show ( )
if save :
fig . savefig ( savefile )
print ( "Saved figure to {0}" . format ( savefile ) )
if return_fig :
return fig
return None |
def set_mark ( self ) :
"""Mark the current location and return its id so that the buffer can return later .""" | self . _bookmarks . append ( self . _offset )
return len ( self . _bookmarks ) - 1 |
def tag ( request , tag_id = None ) :
"""The view used to render a tag after the page has loaded .""" | html = get_tag_html ( tag_id )
t = template . Template ( html )
c = template . RequestContext ( request )
return HttpResponse ( t . render ( c ) ) |
def worker ( workers ) :
"""Starts a Superset worker for async SQL query execution .""" | logging . info ( "The 'superset worker' command is deprecated. Please use the 'celery " "worker' command instead." )
if workers :
celery_app . conf . update ( CELERYD_CONCURRENCY = workers )
elif config . get ( 'SUPERSET_CELERY_WORKERS' ) :
celery_app . conf . update ( CELERYD_CONCURRENCY = config . get ( 'SUPERSET_CELERY_WORKERS' ) )
worker = celery_app . Worker ( optimization = 'fair' )
worker . start ( ) |
def html_format ( data , out , opts = None , ** kwargs ) :
'''Return the formatted string as HTML .''' | ansi_escaped_string = string_format ( data , out , opts , ** kwargs )
return ansi_escaped_string . replace ( ' ' , ' ' ) . replace ( '\n' , '<br />' ) |
def preprocessing ( aws_config , ip_ranges = [ ] , ip_ranges_name_key = None ) :
"""Tweak the AWS config to match cross - service resources and clean any fetching artifacts
: param aws _ config :
: return :""" | map_all_sgs ( aws_config )
map_all_subnets ( aws_config )
set_emr_vpc_ids ( aws_config )
# parse _ elb _ policies ( aws _ config )
# Various data processing calls
add_security_group_name_to_ec2_grants ( aws_config [ 'services' ] [ 'ec2' ] , aws_config [ 'aws_account_id' ] )
process_cloudtrail_trails ( aws_config [ 'services' ] [ 'cloudtrail' ] )
add_cidr_display_name ( aws_config , ip_ranges , ip_ranges_name_key )
merge_route53_and_route53domains ( aws_config )
match_instances_and_roles ( aws_config )
match_iam_policies_and_buckets ( aws_config )
# Preprocessing dictated by metadata
process_metadata_callbacks ( aws_config ) |
def save ( self , path , key , format , data ) :
"""Save a newly generated thumbnail .
path :
path of the source image
key :
key of the thumbnail
format :
thumbnail ' s file extension
data :
thumbnail ' s binary data""" | thumbpath = self . get_thumbpath ( path , key , format )
fullpath = os . path . join ( self . out_path , thumbpath )
self . save_thumb ( fullpath , data )
url = self . get_url ( thumbpath )
thumb = Thumb ( url , key , fullpath )
return thumb |
def selectAll ( self ) :
"""Selects all the items in the scene .""" | currLayer = self . _currentLayer
for item in self . items ( ) :
layer = item . layer ( )
if ( layer == currLayer or not layer ) :
item . setSelected ( True ) |
def visitStartActions ( self , ctx : ShExDocParser . StartActionsContext ) :
"""startActions : codeDecl +""" | self . context . schema . startActs = [ ]
for cd in ctx . codeDecl ( ) :
cdparser = ShexAnnotationAndSemactsParser ( self . context )
cdparser . visit ( cd )
self . context . schema . startActs += cdparser . semacts |
def register_list_auth_roles_command ( self , list_auth_roles_func ) :
"""Add ' list _ auth _ roles ' command to list project authorization roles that can be used with add _ user .
: param list _ auth _ roles _ func : function : run when user choses this option .""" | description = "List authorization roles for use with add_user command."
list_auth_roles_parser = self . subparsers . add_parser ( 'list-auth-roles' , description = description )
list_auth_roles_parser . set_defaults ( func = list_auth_roles_func ) |
def parse_encoding ( value = None ) :
"""Parse a value to a valid encoding .
This function accepts either a member of
: py : class : ` ~ cg : cryptography . hazmat . primitives . serialization . Encoding ` or a string describing a member . If
no value is passed , it will assume ` ` PEM ` ` as a default value . Note that ` ` " ASN1 " ` ` is treated as an alias
for ` ` " DER " ` ` .
> > > parse _ encoding ( )
< Encoding . PEM : ' PEM ' >
> > > parse _ encoding ( ' DER ' )
< Encoding . DER : ' DER ' >
> > > parse _ encoding ( Encoding . PEM )
< Encoding . PEM : ' PEM ' >""" | if value is None :
return ca_settings . CA_DEFAULT_ENCODING
elif isinstance ( value , Encoding ) :
return value
elif isinstance ( value , six . string_types ) :
if value == 'ASN1' :
value = 'DER'
try :
return getattr ( Encoding , value )
except AttributeError :
raise ValueError ( 'Unknown encoding: %s' % value )
else :
raise ValueError ( 'Unknown type passed: %s' % type ( value ) . __name__ ) |
def stats ( self , symbol ) :
"""curl https : / / api . bitfinex . com / v1 / stats / btcusd
{ " period " : 1 , " volume " : " 7410.27250155 " } ,
{ " period " : 7 , " volume " : " 52251.37118006 " } ,
{ " period " : 30 , " volume " : " 464505.07753251 " }""" | data = self . _get ( self . url_for ( PATH_STATS , ( symbol ) ) )
for period in data :
for key , value in period . items ( ) :
if key == 'period' :
new_value = int ( value )
elif key == 'volume' :
new_value = float ( value )
period [ key ] = new_value
return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.