signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def get_default_plugin ( cls ) :
'''Return a default plugin .''' | from importlib import import_module
from django . conf import settings
default_plugin = getattr ( settings , 'ACCESS_DEFAULT_PLUGIN' , "access.plugins.DjangoAccessPlugin" )
if default_plugin not in cls . default_plugins :
logger . info ( "Creating a default plugin: %s" , default_plugin )
path = default_plugin . split ( '.' )
plugin_path = '.' . join ( path [ : - 1 ] )
plugin_name = path [ - 1 ]
DefaultPlugin = getattr ( import_module ( plugin_path ) , plugin_name )
cls . default_plugins [ default_plugin ] = DefaultPlugin ( )
return cls . default_plugins [ default_plugin ] |
def add_coupon ( self , coupon , idempotency_key = None ) :
"""Add a coupon to a Customer .
The coupon can be a Coupon object , or a valid Stripe Coupon ID .""" | if isinstance ( coupon , StripeModel ) :
coupon = coupon . id
stripe_customer = self . api_retrieve ( )
stripe_customer [ "coupon" ] = coupon
stripe_customer . save ( idempotency_key = idempotency_key )
return self . __class__ . sync_from_stripe_data ( stripe_customer ) |
def is_non_zero_before_non_zero ( self , other ) :
"""Return ` ` True ` ` if this time interval ends
when the given other time interval begins ,
and both have non zero length .
: param other : the other interval
: type other : : class : ` ~ aeneas . exacttiming . TimeInterval `
: raises TypeError : if ` ` other ` ` is not an instance of ` ` TimeInterval ` `
: rtype : bool""" | return self . is_adjacent_before ( other ) and ( not self . has_zero_length ) and ( not other . has_zero_length ) |
def get_news_aggregation ( self ) :
"""Calling News Aggregation API
Return :
json data""" | news_aggregation_url = self . api_path + "news_aggregation" + "/"
response = self . get_response ( news_aggregation_url )
return response |
def _record_field_to_json ( fields , row_value ) :
"""Convert a record / struct field to its JSON representation .
Args :
fields ( Sequence [ : class : ` ~ google . cloud . bigquery . schema . SchemaField ` ] , ) :
The : class : ` ~ google . cloud . bigquery . schema . SchemaField ` s of the
record ' s subfields to use for type conversion and field names .
row _ value ( Union [ Tuple [ Any ] , Mapping [ str , Any ] ) :
A tuple or dictionary to convert to JSON - serializable values .
Returns :
Mapping [ str , any ] :
A JSON - serializable dictionary .""" | record = { }
isdict = isinstance ( row_value , dict )
for subindex , subfield in enumerate ( fields ) :
subname = subfield . name
if isdict :
subvalue = row_value . get ( subname )
else :
subvalue = row_value [ subindex ]
record [ subname ] = _field_to_json ( subfield , subvalue )
return record |
def _update_dispatches ( self ) :
"""Updates dispatched data in DB according to information gather by ` mark _ * ` methods ,""" | Dispatch . log_dispatches_errors ( self . _st [ 'error' ] + self . _st [ 'failed' ] )
Dispatch . set_dispatches_statuses ( ** self . _st )
self . _init_delivery_statuses_dict ( ) |
def get_using_network_time ( ) :
'''Display whether network time is on or off
: return : True if network time is on , False if off
: rtype : bool
CLI Example :
. . code - block : : bash
salt ' * ' timezone . get _ using _ network _ time''' | ret = salt . utils . mac_utils . execute_return_result ( 'systemsetup -getusingnetworktime' )
return salt . utils . mac_utils . validate_enabled ( salt . utils . mac_utils . parse_return ( ret ) ) == 'on' |
def info ( ctx , check_fips ) :
"""Show general information .
Displays information about the attached YubiKey such as serial number ,
firmware version , applications , etc .""" | dev = ctx . obj [ 'dev' ]
if dev . is_fips and check_fips :
fips_status = get_overall_fips_status ( dev . serial , dev . config )
click . echo ( 'Device type: {}' . format ( dev . device_name ) )
click . echo ( 'Serial number: {}' . format ( dev . serial or 'Not set or unreadable' ) )
if dev . version :
f_version = '.' . join ( str ( x ) for x in dev . version )
click . echo ( 'Firmware version: {}' . format ( f_version ) )
else :
click . echo ( 'Firmware version: Uncertain, re-run with only one ' 'YubiKey connected' )
config = dev . config
if config . form_factor :
click . echo ( 'Form factor: {!s}' . format ( config . form_factor ) )
click . echo ( 'Enabled USB interfaces: {}' . format ( dev . mode ) )
if config . nfc_supported :
f_nfc = 'enabled' if config . nfc_enabled else 'disabled'
click . echo ( 'NFC interface is {}.' . format ( f_nfc ) )
if config . configuration_locked :
click . echo ( 'Configured applications are protected by a lock code.' )
click . echo ( )
print_app_status_table ( config )
if dev . is_fips and check_fips :
click . echo ( )
click . echo ( 'FIPS Approved Mode: {}' . format ( 'Yes' if all ( fips_status . values ( ) ) else 'No' ) )
status_keys = list ( fips_status . keys ( ) )
status_keys . sort ( )
for status_key in status_keys :
click . echo ( ' {}: {}' . format ( status_key , 'Yes' if fips_status [ status_key ] else 'No' ) ) |
def _mdb_get_database ( uri , ** kwargs ) :
"""Helper - function to connect to MongoDB and return a database object .
The ` uri ' argument should be either a full MongoDB connection URI string ,
or just a database name in which case a connection to the default mongo
instance at mongodb : / / localhost : 27017 will be made .
Performs explicit authentication if a username is provided in a connection
string URI , since PyMongo does not always seem to do that as promised .
: params database : name as string or ( uri , name )
: returns : pymongo database object""" | if not "tz_aware" in kwargs : # default , but not forced
kwargs [ "tz_aware" ] = True
connection_factory = MongoClient
_parsed_uri = { }
try :
_parsed_uri = pymongo . uri_parser . parse_uri ( uri )
except pymongo . errors . InvalidURI : # assume URI to be just the database name
db_name = uri
_conn = MongoClient ( )
pass
else :
if "replicaset" in _parsed_uri [ "options" ] :
connection_factory = MongoReplicaSetClient
db_name = _parsed_uri . get ( "database" , "pysaml2" )
_conn = connection_factory ( uri , ** kwargs )
_db = _conn [ db_name ]
if "username" in _parsed_uri :
_db . authenticate ( _parsed_uri . get ( "username" , None ) , _parsed_uri . get ( "password" , None ) )
return _db |
def migrate ( move_data = True , update_alias = True ) :
"""Upgrade function that creates a new index for the data . Optionally it also can
( and by default will ) reindex previous copy of the data into the new index
( specify ` ` move _ data = False ` ` to skip this step ) and update the alias to
point to the latest index ( set ` ` update _ alias = False ` ` to skip ) .
Note that while this function is running the application can still perform
any and all searches without any loss of functionality . It should , however ,
not perform any writes at this time as those might be lost .""" | # construct a new index name by appending current timestamp
next_index = PATTERN . replace ( '*' , datetime . now ( ) . strftime ( '%Y%m%d%H%M%S%f' ) )
# get the low level connection
es = connections . get_connection ( )
# create new index , it will use the settings from the template
es . indices . create ( index = next_index )
if move_data : # move data from current alias to the new index
es . reindex ( body = { "source" : { "index" : ALIAS } , "dest" : { "index" : next_index } } , request_timeout = 3600 )
# refresh the index to make the changes visible
es . indices . refresh ( index = next_index )
if update_alias : # repoint the alias to point to the newly created index
es . indices . update_aliases ( body = { 'actions' : [ { "remove" : { "alias" : ALIAS , "index" : PATTERN } } , { "add" : { "alias" : ALIAS , "index" : next_index } } , ] } ) |
def oauth1_token_setter ( remote , resp , token_type = '' , extra_data = None ) :
"""Set an OAuth1 token .
: param remote : The remote application .
: param resp : The response .
: param token _ type : The token type . ( Default : ` ` ' ' ` ` )
: param extra _ data : Extra information . ( Default : ` ` None ` ` )
: returns : A : class : ` invenio _ oauthclient . models . RemoteToken ` instance .""" | return token_setter ( remote , resp [ 'oauth_token' ] , secret = resp [ 'oauth_token_secret' ] , extra_data = extra_data , token_type = token_type , ) |
def get_smart_invite ( self , smart_invite_id , recipient_email ) :
"""Gets the details for a smart invite .
: param string smart _ invite _ id : - A String uniquely identifying the event for your
application ( note : this is NOT an ID generated by Cronofy ) .
: param string recipient _ email : - The email address for the recipient to get details for .""" | params = { 'smart_invite_id' : smart_invite_id , 'recipient_email' : recipient_email }
return self . request_handler . get ( 'smart_invites' , params = params , use_api_key = True ) . json ( ) |
def tag_named_entities ( self ) :
"""Tag ` ` named _ entities ` ` layer .
This automatically performs morphological analysis along with all dependencies .""" | if not self . is_tagged ( LABEL ) :
self . tag_labels ( )
nes = [ ]
word_start = - 1
labels = self . labels + [ 'O' ]
# last is sentinel
words = self . words
label = 'O'
for i , l in enumerate ( labels ) :
if l . startswith ( 'B-' ) or l == 'O' :
if word_start != - 1 :
nes . append ( { START : words [ word_start ] [ START ] , END : words [ i - 1 ] [ END ] , LABEL : label } )
if l . startswith ( 'B-' ) :
word_start = i
label = l [ 2 : ]
else :
word_start = - 1
self [ NAMED_ENTITIES ] = nes
return self |
def _create_percolator_mapping ( index , doc_type ) :
"""Update mappings with the percolator field .
. . note : :
This is only needed from ElasticSearch v5 onwards , because percolators
are now just a special type of field inside mappings .""" | if ES_VERSION [ 0 ] >= 5 :
current_search_client . indices . put_mapping ( index = index , doc_type = doc_type , body = PERCOLATOR_MAPPING , ignore = [ 400 , 404 ] ) |
def validate_month_for_31_days ( month_number ) :
"""A function that checks if a given month number is for a month that has 31 days .
Args :
month _ number ( int ) : Number representing the month ( 1-12)
Returns :
bool : True if the month has 31 days , False otherwise
Examples :
> > > validate _ month _ for _ 31 _ days ( 5)
True
> > > validate _ month _ for _ 31 _ days ( 2)
False
> > > validate _ month _ for _ 31 _ days ( 6)
False""" | month_31_days = [ 1 , 3 , 5 , 7 , 8 , 10 , 12 ]
return month_number in month_31_days |
def chi_squareds ( self , p = None ) :
"""Returns a list of chi squared for each data set . Also uses ydata _ massaged .
p = None means use the fit results""" | if len ( self . _set_xdata ) == 0 or len ( self . _set_ydata ) == 0 :
return None
if p is None :
p = self . results [ 0 ]
# get the residuals
rs = self . studentized_residuals ( p )
# Handle the none case
if rs == None :
return None
# square em and sum em .
cs = [ ]
for r in rs :
cs . append ( sum ( r ** 2 ) )
return cs |
def find_xor_mask ( data , alphabet = None , max_depth = 3 , min_depth = 0 , iv = None ) :
"""Produce a series of bytestrings that when XORed together end up being
equal to ` ` data ` ` and only contain characters from the giving
` ` alphabet ` ` . The initial state ( or previous state ) can be given as
` ` iv ` ` .
Arguments :
data ( bytes ) : The data to recreate as a series of XOR operations .
alphabet ( bytes ) : The bytestring containing the allowed characters
for the XOR values . If ` ` None ` ` , all characters except NUL bytes ,
carriage returns and newlines will be allowed .
max _ depth ( int ) : The maximum depth to look for a solution .
min _ depth ( int ) : The minimum depth to look for a solution .
iv ( bytes ) : Initialization vector . If ` ` None ` ` , it will be assumed the
operation starts at an all zero string .
Returns :
A list of bytestrings that , when XOR ' ed with ` ` iv ` ` ( or just eachother
if ` ` iv ` is not providede ) will be the same as ` ` data ` ` .
Examples :
Produce a series of strings that when XORed together will result in
the string ' pwnypack ' using only ASCII characters in the range 65 to
96:
> > > from pwny import *
> > > find _ xor _ mask ( ' pwnypack ' , alphabet = ' ' . join ( chr ( c ) for c in range ( 65 , 97 ) ) )
[ b ' ` ` ` ` ` ' , b ' AAAAABAA ' , b ' QVOXQCBJ ' ]
> > > xor ( xor ( b ' ` ` ` ` ` ' , b ' AAAAABAA ' ) , b ' QVOXQCBJ ' )
' pwnypack '""" | if alphabet is None :
alphabet = set ( i for i in range ( 256 ) if i not in ( 0 , 10 , 13 ) )
else :
alphabet = set ( six . iterbytes ( alphabet ) )
if iv is None :
iv = b'\0' * len ( data )
if len ( data ) != len ( iv ) :
raise ValueError ( 'length of iv differs from data' )
if not min_depth and data == iv :
return [ ]
data = xor ( data , iv )
# Pre - flight check to see if we have all the bits we need .
mask = 0
for ch in alphabet :
mask |= ch
mask = ~ mask
# Map all bytes in data into a { byte : [ pos . . . ] } dictionary , check
# if we have enough bits along the way .
data_map_tmpl = { }
for i , ch in enumerate ( six . iterbytes ( data ) ) :
if ch & mask :
raise ValueError ( 'Alphabet does not contain enough bits.' )
data_map_tmpl . setdefault ( ch , [ ] ) . append ( i )
# Let ' s try to find a solution .
for depth in range ( max ( min_depth , 1 ) , max_depth + 1 ) : # Prepare for round .
data_map = data_map_tmpl . copy ( )
results = [ [ None ] * len ( data ) for _ in range ( depth ) ]
for values in itertools . product ( * ( [ alphabet ] * ( depth - 1 ) ) ) : # Prepare cumulative mask for this combination of alphabet .
mask = 0
for value in values :
mask ^= value
for ch in list ( data_map ) :
r = ch ^ mask
if r in alphabet : # Found a solution for this character , mark the result .
pos = data_map . pop ( ch )
for p in pos :
results [ 0 ] [ p ] = r
for i , value in enumerate ( values ) :
results [ i + 1 ] [ p ] = value
if not data_map : # Aaaand . . We ' re done !
return [ b'' . join ( six . int2byte ( b ) for b in r ) for r in results ]
# No solution found at this depth . Increase depth , try again .
raise ValueError ( 'No solution found.' ) |
def sync_sdb ( saltenv = None , extmod_whitelist = None , extmod_blacklist = None ) :
'''. . versionadded : : 2015.5.8,2015.8.3
Sync sdb modules from ` ` salt : / / _ sdb ` ` to the minion
saltenv
The fileserver environment from which to sync . To sync from more than
one environment , pass a comma - separated list .
If not passed , then all environments configured in the : ref : ` top files
< states - top > ` will be checked for sdb modules to sync . If no top files
are found , then the ` ` base ` ` environment will be synced .
refresh : False
This argument has no affect and is included for consistency with the
other sync functions .
extmod _ whitelist : None
comma - seperated list of modules to sync
extmod _ blacklist : None
comma - seperated list of modules to blacklist based on type
CLI Example :
. . code - block : : bash
salt ' * ' saltutil . sync _ sdb
salt ' * ' saltutil . sync _ sdb saltenv = dev
salt ' * ' saltutil . sync _ sdb saltenv = base , dev''' | ret = _sync ( 'sdb' , saltenv , extmod_whitelist , extmod_blacklist )
return ret |
def anyword_substring_search_inner ( query_word , target_words ) :
"""return True if ANY target _ word matches a query _ word""" | for target_word in target_words :
if ( target_word . startswith ( query_word ) ) :
return query_word
return False |
def psql_to_obj ( cls , file_path = None , text = '' , columns = None , remove_empty_rows = True , key_on = None , deliminator = ' | ' , eval_cells = True ) :
"""This will convert a psql file or text to a seaborn table
: param file _ path : str of the path to the file
: param text : str of the csv text
: param columns : list of str of columns to use
: param remove _ empty _ rows : bool if True will remove empty rows
: param key _ on : list of str of columns to key on
: param deliminator : str to use as a deliminator
: param eval _ cells : bool if True will try to evaluate numbers
: return : SeabornTable""" | text = cls . _get_lines ( file_path , text )
if len ( text ) == 1 :
text = text [ 0 ] . split ( '\r' )
if not text [ 1 ] . replace ( '+' , '' ) . replace ( '-' , '' ) . strip ( ) :
text . pop ( 1 )
# get rid of bar
list_of_list = [ [ cls . _eval_cell ( cell , _eval = eval_cells ) for cell in row . split ( deliminator ) ] for row in text if not remove_empty_rows or True in [ bool ( r ) for r in row ] ]
return cls . list_to_obj ( list_of_list , key_on = key_on , columns = columns ) |
def match_repository_configuration ( url , page_size = 10 , page_index = 0 , sort = "" ) :
"""Search for Repository Configurations based on internal or external url with exact match""" | content = match_repository_configuration_raw ( url , page_size , page_index , sort )
if content :
return utils . format_json_list ( content ) |
def is_quota_exceeded ( self ) -> bool :
'''Return whether the quota is exceeded .''' | if self . quota and self . _url_table is not None :
return self . size >= self . quota and self . _url_table . get_root_url_todo_count ( ) == 0 |
def forcemerge ( self , index = None , params = None ) :
"""The force merge API allows to force merging of one or more indices
through an API . The merge relates to the number of segments a Lucene
index holds within each shard . The force merge operation allows to
reduce the number of segments by merging them .
This call will block until the merge is complete . If the http
connection is lost , the request will continue in the background , and
any new requests will block until the previous force merge is complete .
` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / indices - forcemerge . html > ` _
: arg index : A comma - separated list of index names ; use ` _ all ` or empty
string to perform the operation on all indices
: arg allow _ no _ indices : Whether to ignore if a wildcard indices
expression resolves into no concrete indices . ( This includes ` _ all `
string or when no indices have been specified )
: arg expand _ wildcards : Whether to expand wildcard expression to concrete
indices that are open , closed or both . , default ' open ' , valid
choices are : ' open ' , ' closed ' , ' none ' , ' all '
: arg flush : Specify whether the index should be flushed after performing
the operation ( default : true )
: arg ignore _ unavailable : Whether specified concrete indices should be
ignored when unavailable ( missing or closed )
: arg max _ num _ segments : The number of segments the index should be merged
into ( default : dynamic )
: arg only _ expunge _ deletes : Specify whether the operation should only
expunge deleted documents
: arg operation _ threading : TODO : ?""" | return self . transport . perform_request ( "POST" , _make_path ( index , "_forcemerge" ) , params = params ) |
def __allocateBits ( self ) :
"""Allocates the bit range depending on the required bit count""" | if self . _bit_count < 0 :
raise Exception ( "A margin cannot request negative number of bits" )
if self . _bit_count == 0 :
return
# Build a list of occupied ranges
margins = self . _qpart . getMargins ( )
occupiedRanges = [ ]
for margin in margins :
bitRange = margin . getBitRange ( )
if bitRange is not None : # pick the right position
added = False
for index in range ( len ( occupiedRanges ) ) :
r = occupiedRanges [ index ]
if bitRange [ 1 ] < r [ 0 ] :
occupiedRanges . insert ( index , bitRange )
added = True
break
if not added :
occupiedRanges . append ( bitRange )
vacant = 0
for r in occupiedRanges :
if r [ 0 ] - vacant >= self . _bit_count :
self . _bitRange = ( vacant , vacant + self . _bit_count - 1 )
return
vacant = r [ 1 ] + 1
# Not allocated , i . e . grab the tail bits
self . _bitRange = ( vacant , vacant + self . _bit_count - 1 ) |
def clear_masters ( self ) :
"""Clear master packages if already exist in dependencies
or if added to install two or more times""" | packages = [ ]
for mas in Utils ( ) . remove_dbs ( self . packages ) :
if mas not in self . dependencies :
packages . append ( mas )
self . packages = packages |
def cancel_rekey ( self , recovery_key = False ) :
"""Cancel any in - progress rekey .
This clears the rekey settings as well as any progress made . This must be called to change the parameters of the
rekey .
Note : Verification is still a part of a rekey . If rekeying is canceled during the verification flow , the current
unseal keys remain valid .
Supported methods :
DELETE : / sys / rekey / init . Produces : 204 ( empty body )
DELETE : / sys / rekey - recovery - key / init . Produces : 204 ( empty body )
: param recovery _ key : If true , send requests to " rekey - recovery - key " instead of " rekey " api path .
: type recovery _ key : bool
: return : The response of the request .
: rtype : requests . Response""" | api_path = '/v1/sys/rekey/init'
if recovery_key :
api_path = '/v1/sys/rekey-recovery-key/init'
response = self . _adapter . delete ( url = api_path , )
return response |
def lv_unpack ( txt ) :
"""Deserializes a string of the length : value format
: param txt : The input string
: return : a list og values""" | txt = txt . strip ( )
res = [ ]
while txt :
l , v = txt . split ( ':' , 1 )
res . append ( v [ : int ( l ) ] )
txt = v [ int ( l ) : ]
return res |
def get_start_time ( self ) :
"""Determines when has this process started running .
@ rtype : win32 . SYSTEMTIME
@ return : Process start time .""" | if win32 . PROCESS_ALL_ACCESS == win32 . PROCESS_ALL_ACCESS_VISTA :
dwAccess = win32 . PROCESS_QUERY_LIMITED_INFORMATION
else :
dwAccess = win32 . PROCESS_QUERY_INFORMATION
hProcess = self . get_handle ( dwAccess )
CreationTime = win32 . GetProcessTimes ( hProcess ) [ 0 ]
return win32 . FileTimeToSystemTime ( CreationTime ) |
def _handle_eio_message ( self , sid , data ) :
"""Dispatch Engine . IO messages .""" | if sid in self . _binary_packet :
pkt = self . _binary_packet [ sid ]
if pkt . add_attachment ( data ) :
del self . _binary_packet [ sid ]
if pkt . packet_type == packet . BINARY_EVENT :
self . _handle_event ( sid , pkt . namespace , pkt . id , pkt . data )
else :
self . _handle_ack ( sid , pkt . namespace , pkt . id , pkt . data )
else :
pkt = packet . Packet ( encoded_packet = data )
if pkt . packet_type == packet . CONNECT :
self . _handle_connect ( sid , pkt . namespace )
elif pkt . packet_type == packet . DISCONNECT :
self . _handle_disconnect ( sid , pkt . namespace )
elif pkt . packet_type == packet . EVENT :
self . _handle_event ( sid , pkt . namespace , pkt . id , pkt . data )
elif pkt . packet_type == packet . ACK :
self . _handle_ack ( sid , pkt . namespace , pkt . id , pkt . data )
elif pkt . packet_type == packet . BINARY_EVENT or pkt . packet_type == packet . BINARY_ACK :
self . _binary_packet [ sid ] = pkt
elif pkt . packet_type == packet . ERROR :
raise ValueError ( 'Unexpected ERROR packet.' )
else :
raise ValueError ( 'Unknown packet type.' ) |
def for_category ( self , category , context = None ) :
"""Returns actions list for this category in current application .
Actions are filtered according to : meth : ` . Action . available ` .
if ` context ` is None , then current action context is used
( : attr : ` context ` )""" | assert self . installed ( ) , "Actions not enabled on this application"
actions = self . _state [ "categories" ] . get ( category , [ ] )
if context is None :
context = self . context
return [ a for a in actions if a . available ( context ) ] |
def _get_cputemp_with_lmsensors ( self , zone = None ) :
"""Tries to determine CPU temperature using the ' sensors ' command .
Searches for the CPU temperature by looking for a value prefixed
by either " CPU Temp " or " Core 0 " - does not look for or average
out temperatures of all codes if more than one .""" | sensors = None
command = [ "sensors" ]
if zone :
try :
sensors = self . py3 . command_output ( command + [ zone ] )
except self . py3 . CommandError :
pass
if not sensors :
sensors = self . py3 . command_output ( command )
m = re . search ( "(Core 0|CPU Temp).+\+(.+).+\(.+" , sensors )
if m :
cpu_temp = float ( m . groups ( ) [ 1 ] . strip ( ) [ : - 2 ] )
else :
cpu_temp = "?"
return cpu_temp |
def mavlink_packet ( self , m ) :
'''handle an incoming mavlink packet''' | from MAVProxy . modules . mavproxy_map import mp_slipmap
mtype = m . get_type ( )
sysid = m . get_srcSystem ( )
if mtype == "HEARTBEAT" :
vname = 'plane'
if m . type in [ mavutil . mavlink . MAV_TYPE_FIXED_WING ] :
vname = 'plane'
elif m . type in [ mavutil . mavlink . MAV_TYPE_GROUND_ROVER ] :
vname = 'rover'
elif m . type in [ mavutil . mavlink . MAV_TYPE_SUBMARINE ] :
vname = 'sub'
elif m . type in [ mavutil . mavlink . MAV_TYPE_SURFACE_BOAT ] :
vname = 'boat'
elif m . type in [ mavutil . mavlink . MAV_TYPE_QUADROTOR , mavutil . mavlink . MAV_TYPE_HEXAROTOR , mavutil . mavlink . MAV_TYPE_OCTOROTOR , mavutil . mavlink . MAV_TYPE_TRICOPTER ] :
vname = 'copter'
elif m . type in [ mavutil . mavlink . MAV_TYPE_COAXIAL ] :
vname = 'singlecopter'
elif m . type in [ mavutil . mavlink . MAV_TYPE_HELICOPTER ] :
vname = 'heli'
elif m . type in [ mavutil . mavlink . MAV_TYPE_ANTENNA_TRACKER ] :
vname = 'antenna'
self . vehicle_type_by_sysid [ sysid ] = vname
if not sysid in self . vehicle_type_by_sysid :
self . vehicle_type_by_sysid [ sysid ] = 'plane'
self . vehicle_type_name = self . vehicle_type_by_sysid [ sysid ]
# this is the beginnings of allowing support for multiple vehicles
# in the air at the same time
vehicle = 'Vehicle%u' % m . get_srcSystem ( )
if mtype == "SIMSTATE" and self . map_settings . showsimpos :
self . create_vehicle_icon ( 'Sim' + vehicle , 'green' )
self . map . set_position ( 'Sim' + vehicle , ( m . lat * 1.0e-7 , m . lng * 1.0e-7 ) , rotation = math . degrees ( m . yaw ) )
elif mtype == "AHRS2" and self . map_settings . showahrs2pos :
self . create_vehicle_icon ( 'AHRS2' + vehicle , 'blue' )
self . map . set_position ( 'AHRS2' + vehicle , ( m . lat * 1.0e-7 , m . lng * 1.0e-7 ) , rotation = math . degrees ( m . yaw ) )
elif mtype == "AHRS3" and self . map_settings . showahrs3pos :
self . create_vehicle_icon ( 'AHRS3' + vehicle , 'orange' )
self . map . set_position ( 'AHRS3' + vehicle , ( m . lat * 1.0e-7 , m . lng * 1.0e-7 ) , rotation = math . degrees ( m . yaw ) )
elif mtype == "GPS_RAW_INT" and self . map_settings . showgpspos :
( lat , lon ) = ( m . lat * 1.0e-7 , m . lon * 1.0e-7 )
if lat != 0 or lon != 0 :
if m . vel > 300 or 'ATTITUDE' not in self . master . messages :
cog = m . cog * 0.01
else :
cog = math . degrees ( self . master . messages [ 'ATTITUDE' ] . yaw )
self . create_vehicle_icon ( 'GPS' + vehicle , 'blue' )
self . map . set_position ( 'GPS' + vehicle , ( lat , lon ) , rotation = cog )
elif mtype == "GPS2_RAW" and self . map_settings . showgps2pos :
( lat , lon ) = ( m . lat * 1.0e-7 , m . lon * 1.0e-7 )
if lat != 0 or lon != 0 :
self . create_vehicle_icon ( 'GPS2' + vehicle , 'green' )
self . map . set_position ( 'GPS2' + vehicle , ( lat , lon ) , rotation = m . cog * 0.01 )
elif mtype == 'GLOBAL_POSITION_INT' :
( lat , lon , heading ) = ( m . lat * 1.0e-7 , m . lon * 1.0e-7 , m . hdg * 0.01 )
self . lat_lon [ m . get_srcSystem ( ) ] = ( lat , lon )
if abs ( lat ) > 1.0e-3 or abs ( lon ) > 1.0e-3 :
self . have_global_position = True
self . create_vehicle_icon ( 'Pos' + vehicle , 'red' , follow = True )
if len ( self . vehicle_type_by_sysid ) > 1 :
label = str ( sysid )
else :
label = None
self . map . set_position ( 'Pos' + vehicle , ( lat , lon ) , rotation = heading , label = label , colour = ( 255 , 255 , 255 ) )
self . map . set_follow_object ( 'Pos' + vehicle , self . is_primary_vehicle ( m ) )
elif mtype == 'LOCAL_POSITION_NED' and not self . have_global_position :
( lat , lon ) = mp_util . gps_offset ( 0 , 0 , m . x , m . y )
self . lat_lon [ m . get_srcSystem ( ) ] = ( lat , lon )
heading = math . degrees ( math . atan2 ( m . vy , m . vx ) )
self . create_vehicle_icon ( 'Pos' + vehicle , 'red' , follow = True )
self . map . set_position ( 'Pos' + vehicle , ( lat , lon ) , rotation = heading )
self . map . set_follow_object ( 'Pos' + vehicle , self . is_primary_vehicle ( m ) )
elif mtype == 'HOME_POSITION' :
( lat , lon ) = ( m . latitude * 1.0e-7 , m . longitude * 1.0e-7 )
icon = self . map . icon ( 'home.png' )
self . map . add_object ( mp_slipmap . SlipIcon ( 'HOME_POSITION' , ( lat , lon ) , icon , layer = 3 , rotation = 0 , follow = False ) )
elif mtype == "NAV_CONTROLLER_OUTPUT" :
tlayer = 'Trajectory%u' % m . get_srcSystem ( )
if ( self . master . flightmode in [ "AUTO" , "GUIDED" , "LOITER" , "RTL" , "QRTL" , "QLOITER" , "QLAND" , "FOLLOW" ] and m . get_srcSystem ( ) in self . lat_lon ) :
( lat , lon ) = self . lat_lon [ m . get_srcSystem ( ) ]
trajectory = [ ( lat , lon ) , mp_util . gps_newpos ( lat , lon , m . target_bearing , m . wp_dist ) ]
self . map . add_object ( mp_slipmap . SlipPolygon ( 'trajectory' , trajectory , layer = tlayer , linewidth = 2 , colour = ( 255 , 0 , 180 ) ) )
else :
self . map . add_object ( mp_slipmap . SlipClearLayer ( tlayer ) )
elif mtype == "POSITION_TARGET_GLOBAL_INT" : # FIXME : base this off SYS _ STATUS . MAV _ SYS _ STATUS _ SENSOR _ XY _ POSITION _ CONTROL ?
if not m . get_srcSystem ( ) in self . lat_lon :
return
tlayer = 'PostionTarget%u' % m . get_srcSystem ( )
( lat , lon ) = self . lat_lon [ m . get_srcSystem ( ) ]
if ( self . master . flightmode in [ "AUTO" , "GUIDED" , "LOITER" , "RTL" , "QRTL" , "QLOITER" , "QLAND" , "FOLLOW" ] ) :
lat_float = m . lat_int * 1e-7
lon_float = m . lon_int * 1e-7
vec = [ ( lat_float , lon_float ) , ( lat , lon ) ]
self . map . add_object ( mp_slipmap . SlipPolygon ( 'position_target' , vec , layer = tlayer , linewidth = 2 , colour = ( 0 , 255 , 0 ) ) )
else :
self . map . add_object ( mp_slipmap . SlipClearLayer ( tlayer ) )
if not self . is_primary_vehicle ( m ) : # the rest should only be done for the primary vehicle
return
# if the waypoints have changed , redisplay
last_wp_change = self . module ( 'wp' ) . wploader . last_change
if self . wp_change_time != last_wp_change and abs ( time . time ( ) - last_wp_change ) > 1 :
self . wp_change_time = last_wp_change
self . display_waypoints ( )
# this may have affected the landing lines from the rally points :
self . rally_change_time = time . time ( )
# if the fence has changed , redisplay
if self . fence_change_time != self . module ( 'fence' ) . fenceloader . last_change :
self . display_fence ( )
# if the rallypoints have changed , redisplay
if self . rally_change_time != self . module ( 'rally' ) . rallyloader . last_change :
self . rally_change_time = self . module ( 'rally' ) . rallyloader . last_change
icon = self . map . icon ( 'rallypoint.png' )
self . map . add_object ( mp_slipmap . SlipClearLayer ( 'RallyPoints' ) )
for i in range ( self . module ( 'rally' ) . rallyloader . rally_count ( ) ) :
rp = self . module ( 'rally' ) . rallyloader . rally_point ( i )
popup = MPMenuSubMenu ( 'Popup' , items = [ MPMenuItem ( 'Rally Remove' , returnkey = 'popupRallyRemove' ) , MPMenuItem ( 'Rally Move' , returnkey = 'popupRallyMove' ) ] )
self . map . add_object ( mp_slipmap . SlipIcon ( 'Rally %u' % ( i + 1 ) , ( rp . lat * 1.0e-7 , rp . lng * 1.0e-7 ) , icon , layer = 'RallyPoints' , rotation = 0 , follow = False , popup_menu = popup ) )
loiter_rad = self . get_mav_param ( 'WP_LOITER_RAD' )
if self . map_settings . rallycircle :
self . map . add_object ( mp_slipmap . SlipCircle ( 'Rally Circ %u' % ( i + 1 ) , 'RallyPoints' , ( rp . lat * 1.0e-7 , rp . lng * 1.0e-7 ) , loiter_rad , ( 255 , 255 , 0 ) , 2 , arrow = self . map_settings . showdirection ) )
# draw a line between rally point and nearest landing point
nearest_land_wp = None
nearest_distance = 10000000.0
for j in range ( self . module ( 'wp' ) . wploader . count ( ) ) :
w = self . module ( 'wp' ) . wploader . wp ( j )
if ( w . command == 21 ) : # if landing waypoint
# get distance between rally point and this waypoint
dis = mp_util . gps_distance ( w . x , w . y , rp . lat * 1.0e-7 , rp . lng * 1.0e-7 )
if ( dis < nearest_distance ) :
nearest_land_wp = w
nearest_distance = dis
if nearest_land_wp is not None :
points = [ ]
# tangential approach ?
if self . get_mav_param ( 'LAND_BREAK_PATH' ) == 0 :
theta = math . degrees ( math . atan ( loiter_rad / nearest_distance ) )
tan_dis = math . sqrt ( nearest_distance * nearest_distance - ( loiter_rad * loiter_rad ) )
ral_bearing = mp_util . gps_bearing ( nearest_land_wp . x , nearest_land_wp . y , rp . lat * 1.0e-7 , rp . lng * 1.0e-7 )
points . append ( mp_util . gps_newpos ( nearest_land_wp . x , nearest_land_wp . y , ral_bearing + theta , tan_dis ) )
else : # not tangential approach
points . append ( ( rp . lat * 1.0e-7 , rp . lng * 1.0e-7 ) )
points . append ( ( nearest_land_wp . x , nearest_land_wp . y ) )
self . map . add_object ( mp_slipmap . SlipPolygon ( 'Rally Land %u' % ( i + 1 ) , points , 'RallyPoints' , ( 255 , 255 , 0 ) , 2 ) )
# check for any events from the map
self . map . check_events ( ) |
def _create_temporary_projects ( enabled_regions , args ) :
"""Creates a temporary project needed to build an underlying workflow
for a global workflow . Returns a dictionary with region names as keys
and project IDs as values
The regions in which projects will be created can be :
i . regions specified in dxworkflow . json " regionalOptions "
ii . regions specified as an argument to " dx build "
iii . current context project , if None of the above are set
If both args and dxworkflow . json specify regions , they must match .""" | # Create one temp project in each region
projects_by_region = { }
# Project IDs by region
for region in enabled_regions :
try :
project_input = { "name" : "Temporary build project for dx build global workflow" , "region" : region }
if args . bill_to :
project_input [ "billTo" ] = args . bill_to
temp_project = dxpy . api . project_new ( project_input ) [ "id" ]
projects_by_region [ region ] = temp_project
logger . debug ( "Created temporary project {} to build in" . format ( temp_project ) )
except : # Clean up any temp projects that might have been created
if projects_by_region :
dxpy . executable_builder . delete_temporary_projects ( projects_by_region . values ( ) )
err_exit ( )
return projects_by_region |
def shell_run ( cmd , cin = None , cwd = None , timeout = 10 , critical = True , verbose = True ) :
'''Runs a shell command within a controlled environment .
. . note : : | use _ photon _ m |
: param cmd : The command to run
* A string one would type into a console like : command : ` git push - u origin master ` .
* Will be split using : py : func : ` shlex . split ` .
* It is possible to use a list here , but then no splitting is done .
: param cin :
Add something to stdin of ` cmd `
: param cwd :
Run ` cmd ` insde specified current working directory
: param timeout :
Catch infinite loops ( e . g . ` ` ping ` ` ) .
Exit after ` timeout ` seconds
: param critical :
If set to ` ` True ` ` : | appteardown | on failure of ` cmd `
: param verbose :
Show messages and warnings
: returns :
A dictionary containing the results from
running ` cmd ` with the following :
* ' command ' : ` cmd `
* ' stdin ' : ` cin ` ( If data was set in ` cin ` )
* ' cwd ' : ` cwd ` ( If ` cwd ` was set )
* ' exception ' : exception message ( If an exception was thrown )
* ' timeout ' : ` timeout ` ( If a timeout exception was thrown )
* ' stdout ' : List from stdout ( If any )
* ' stderr ' : List from stderr ( If any )
* ' returncode ' : The returncode ( If not any exception )
* ' out ' : The most urgent message as joined string . ( ' exception ' > ' stderr ' > ' stdout ' )''' | res = dict ( command = cmd )
if cin :
cin = str ( cin )
res . update ( dict ( stdin = cin ) )
if cwd :
res . update ( dict ( cwd = cwd ) )
if isinstance ( cmd , str ) :
cmd = _split ( cmd )
try :
p = _Popen ( cmd , stdin = _PIPE , stdout = _PIPE , stderr = _PIPE , bufsize = 1 , cwd = cwd , universal_newlines = True )
except Exception as ex :
res . update ( dict ( exception = str ( ex ) ) )
else :
try :
out , err = p . communicate ( input = cin , timeout = timeout )
if out :
res . update ( dict ( stdout = [ o for o in out . split ( '\n' ) if o ] ) )
if err :
res . update ( dict ( stderr = [ e for e in err . split ( '\n' ) if e ] ) )
res . update ( dict ( returncode = p . returncode ) )
except _TimeoutExpired as ex :
res . update ( dict ( exception = str ( ex ) , timeout = timeout ) )
p . kill ( )
except Exception as ex :
res . update ( dict ( exception = str ( ex ) ) )
res . update ( out = ( res . get ( 'exception' ) or '\n' . join ( res . get ( 'stderr' ) or res . get ( 'stdout' , '' ) ) ) )
if res . get ( 'returncode' , - 1 ) != 0 :
res . update ( dict ( critical = critical ) )
shell_notify ( 'error in shell command \'%s\'' % ( res . get ( 'command' ) ) , state = True if critical else None , more = res , verbose = verbose )
return res |
def _akima_interpolate ( xi , yi , x , der = 0 , axis = 0 ) :
"""Convenience function for akima interpolation .
xi and yi are arrays of values used to approximate some function f ,
with ` ` yi = f ( xi ) ` ` .
See ` Akima1DInterpolator ` for details .
Parameters
xi : array _ like
A sorted list of x - coordinates , of length N .
yi : array _ like
A 1 - D array of real values . ` yi ` ' s length along the interpolation
axis must be equal to the length of ` xi ` . If N - D array , use axis
parameter to select correct axis .
x : scalar or array _ like
Of length M .
der : int or list , optional
How many derivatives to extract ; None for all potentially
nonzero derivatives ( that is a number equal to the number
of points ) , or a list of derivatives to extract . This number
includes the function value as 0th derivative .
axis : int , optional
Axis in the yi array corresponding to the x - coordinate values .
See Also
scipy . interpolate . Akima1DInterpolator
Returns
y : scalar or array _ like
The result , of length R or length M or M by R ,""" | from scipy import interpolate
try :
P = interpolate . Akima1DInterpolator ( xi , yi , axis = axis )
except TypeError : # Scipy earlier than 0.17.0 missing axis
P = interpolate . Akima1DInterpolator ( xi , yi )
if der == 0 :
return P ( x )
elif interpolate . _isscalar ( der ) :
return P ( x , der = der )
else :
return [ P ( x , nu ) for nu in der ] |
def files_write ( self , path , file , offset = 0 , create = False , truncate = False , count = None , ** kwargs ) :
"""Writes to a mutable file in the MFS .
. . code - block : : python
> > > c . files _ write ( " / test / file " , io . BytesIO ( b " hi " ) , create = True )
Parameters
path : str
Filepath within the MFS
file : io . RawIOBase
IO stream object with data that should be written
offset : int
Byte offset at which to begin writing at
create : bool
Create the file if it does not exist
truncate : bool
Truncate the file to size zero before writing
count : int
Maximum number of bytes to read from the source ` ` file ` `""" | opts = { "offset" : offset , "create" : create , "truncate" : truncate }
if count is not None :
opts [ "count" ] = count
kwargs . setdefault ( "opts" , opts )
args = ( path , )
body , headers = multipart . stream_files ( file , self . chunk_size )
return self . _client . request ( '/files/write' , args , data = body , headers = headers , ** kwargs ) |
def less_naive ( gold_schemes ) :
"""find ' less naive ' baseline ( most common scheme of a given length in subcorpus )""" | best_schemes = defaultdict ( lambda : defaultdict ( int ) )
for g in gold_schemes :
best_schemes [ len ( g ) ] [ tuple ( g ) ] += 1
for i in best_schemes :
best_schemes [ i ] = tuple ( max ( best_schemes [ i ] . items ( ) , key = lambda x : x [ 1 ] ) [ 0 ] )
naive_schemes = [ ]
for g in gold_schemes :
naive_schemes . append ( best_schemes [ len ( g ) ] )
return naive_schemes |
def mkdir ( self , href ) :
"""create remote folder
: param href : remote path
: return : response""" | for iTry in range ( TRYINGS ) :
logger . info ( u ( "mkdir(%s): %s" ) % ( iTry , href ) )
try :
href = remote ( href )
con = self . getConnection ( )
con . request ( "MKCOL" , _encode_utf8 ( href ) , "" , self . getHeaders ( ) )
response = con . getresponse ( )
checkResponse ( response )
return response . read ( )
except ConnectionException :
raise
except Exception :
e = sys . exc_info ( ) [ 1 ]
logger . exception ( e ) |
def get_command_arg_list ( self , command_name : str , to_parse : Union [ Statement , str ] , preserve_quotes : bool ) -> Tuple [ Statement , List [ str ] ] :
"""Called by the argument _ list and argparse wrappers to retrieve just the arguments being
passed to their do _ * methods as a list .
: param command _ name : name of the command being run
: param to _ parse : what is being passed to the do _ * method . It can be one of two types :
1 . An already parsed Statement
2 . An argument string in cases where a do _ * method is explicitly called
e . g . : Calling do _ help ( ' alias create ' ) would cause to _ parse to be ' alias create '
In this case , the string will be converted to a Statement and returned along
with the argument list .
: param preserve _ quotes : if True , then quotes will not be stripped from the arguments
: return : A tuple containing :
The Statement used to retrieve the arguments
The argument list""" | # Check if to _ parse needs to be converted to a Statement
if not isinstance ( to_parse , Statement ) :
to_parse = self . parse ( command_name + ' ' + to_parse , expand = False )
if preserve_quotes :
return to_parse , to_parse . arg_list
else :
return to_parse , to_parse . argv [ 1 : ] |
def root ( path : Union [ str , pathlib . Path ] ) -> _Root :
"""Retrieve a root directory object from a path .
: param path : The path string or Path object .
: return : The created root object .""" | return _Root . from_path ( _normalise_path ( path ) ) |
def lstled ( x , n , array ) :
"""Given a number x and an array of non - decreasing floats
find the index of the largest array element less than or equal to x .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / lstled _ c . html
: param x : Value to search against .
: type x : float
: param n : Number elements in array .
: type n : int
: param array : Array of possible lower bounds
: type array : list
: return : index of the last element of array that is less than or equal to x .
: rtype : int""" | array = stypes . toDoubleVector ( array )
x = ctypes . c_double ( x )
n = ctypes . c_int ( n )
return libspice . lstled_c ( x , n , array ) |
def run ( self ) :
"""Filter job callback .""" | from pyrocore import config
try :
config . engine . open ( )
# TODO : select view into items
items = [ ]
self . run_filter ( items )
except ( error . LoggableError , xmlrpc . ERRORS ) as exc :
self . LOG . warn ( str ( exc ) ) |
def decouple ( fn ) :
"""Inverse operation of couple .
Create two functions of one argument and one return from a function that
takes two arguments and has two returns
Examples
> > > h = lambda x : ( 2 * x * * 3 , 6 * x * * 2)
> > > f , g = decouple ( h )
> > > f ( 5)
250
> > > g ( 5)
150""" | def fst ( * args , ** kwargs ) :
return fn ( * args , ** kwargs ) [ 0 ]
def snd ( * args , ** kwargs ) :
return fn ( * args , ** kwargs ) [ 1 ]
return fst , snd |
def inspecting_client_factory ( self , host , port , ioloop_set_to ) :
"""Return an instance of : class : ` ReplyWrappedInspectingClientAsync ` or similar
Provided to ease testing . Dynamically overriding this method after instantiation
but before start ( ) is called allows for deep brain surgery . See
: class : ` katcp . fake _ clients . fake _ inspecting _ client _ factory `""" | return ReplyWrappedInspectingClientAsync ( host , port , ioloop = ioloop_set_to , auto_reconnect = self . auto_reconnect ) |
def _setChoice ( self , s , strict = 0 ) :
"""Set choice parameter from string s""" | clist = _getChoice ( s , strict )
self . choice = list ( map ( self . _coerceValue , clist ) )
self . _setChoiceDict ( ) |
def _toolkits_select_columns ( dataset , columns ) :
"""Same as select columns but redirect runtime error to ToolkitError .""" | try :
return dataset . select_columns ( columns )
except RuntimeError :
missing_features = list ( set ( columns ) . difference ( set ( dataset . column_names ( ) ) ) )
raise ToolkitError ( "Input data does not contain the following columns: " + "{}" . format ( missing_features ) ) |
def set_ntp_servers ( primary_server = None , secondary_server = None , deploy = False ) :
'''Set the NTP servers of the Palo Alto proxy minion . A commit will be required before this is processed .
CLI Example :
Args :
primary _ server ( str ) : The primary NTP server IP address or FQDN .
secondary _ server ( str ) : The secondary NTP server IP address or FQDN .
deploy ( bool ) : If true then commit the full candidate configuration , if false only set pending change .
. . code - block : : bash
salt ' * ' ntp . set _ servers 0 . pool . ntp . org 1 . pool . ntp . org
salt ' * ' ntp . set _ servers primary _ server = 0 . pool . ntp . org secondary _ server = 1 . pool . ntp . org
salt ' * ' ntp . ser _ servers 0 . pool . ntp . org 1 . pool . ntp . org deploy = True''' | ret = { }
if primary_server :
query = { 'type' : 'config' , 'action' : 'set' , 'xpath' : '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' 'primary-ntp-server' , 'element' : '<ntp-server-address>{0}</ntp-server-address>' . format ( primary_server ) }
ret . update ( { 'primary_server' : __proxy__ [ 'panos.call' ] ( query ) } )
if secondary_server :
query = { 'type' : 'config' , 'action' : 'set' , 'xpath' : '/config/devices/entry[@name=\'localhost.localdomain\']/deviceconfig/system/ntp-servers/' 'secondary-ntp-server' , 'element' : '<ntp-server-address>{0}</ntp-server-address>' . format ( secondary_server ) }
ret . update ( { 'secondary_server' : __proxy__ [ 'panos.call' ] ( query ) } )
if deploy is True :
ret . update ( commit ( ) )
return ret |
def clean_filename ( s , minimal_change = False ) :
"""Sanitize a string to be used as a filename .
If minimal _ change is set to true , then we only strip the bare minimum of
characters that are problematic for filesystems ( namely , ' : ' , ' / ' and
' \x00 ' , ' \n ' ) .""" | # First , deal with URL encoded strings
h = html_parser . HTMLParser ( )
s = h . unescape ( s )
s = unquote_plus ( s )
# Strip forbidden characters
# https : / / msdn . microsoft . com / en - us / library / windows / desktop / aa365247 ( v = vs . 85 ) . aspx
s = ( s . replace ( ':' , '-' ) . replace ( '/' , '-' ) . replace ( '<' , '-' ) . replace ( '>' , '-' ) . replace ( '"' , '-' ) . replace ( '\\' , '-' ) . replace ( '|' , '-' ) . replace ( '?' , '-' ) . replace ( '*' , '-' ) . replace ( '\x00' , '-' ) . replace ( '\n' , ' ' ) )
# Remove trailing dots and spaces ; forbidden on Windows
s = s . rstrip ( ' .' )
if minimal_change :
return s
s = s . replace ( '(' , '' ) . replace ( ')' , '' )
s = s . rstrip ( '.' )
# Remove excess of trailing dots
s = s . strip ( ) . replace ( ' ' , '_' )
valid_chars = '-_.()%s%s' % ( string . ascii_letters , string . digits )
return '' . join ( c for c in s if c in valid_chars ) |
def copy_db ( source_env , destination_env ) :
"""Copies Db betweem servers , ie develop to qa .
Should be called by function from function defined in project fabfile .
Example usage :
def copy _ db _ between _ servers ( source _ server , destination _ server ) :
source _ env = { }
destination _ env = { }
def populate _ env _ dict ( server , local _ env ) :
app _ dir = ' nutrimom '
if server = = ' nm - dev ' :
user = ' nutrimom - dev '
prefix = " dev "
environment = ' devel '
host _ string = ' dev . arabel . la '
elif server = = ' nm - qa ' :
user = ' nutrimom - qa '
prefix = " qa "
environment = ' qa '
host _ string = ' qa . arabel . la '
elif server . startswith ( ' nm - f ' ) :
if server in [ ' nm - f1 ' , ' nm - f2 ' , ' nm - f3 ' , ' nm - f4 ' , ' nm - f5 ' ] :
user = ' nutrimom - ' + server . split ( ' - ' ) [ 1]
prefix = user . split ( ' - ' ) [ 1]
environment = prefix
host _ string = ' dev . arabel . la '
else :
print ( " supported params : nm - dev , nm - qa , nm - fx " )
sys . exit ( )
local _ env [ ' app _ dir ' ] = app _ dir
local _ env [ ' remote _ user ' ] = user
local _ env [ ' remote _ path ' ] = ' / home / % s / www / ' % ( user )
local _ env [ ' dir ' ] = ' / home / % s / Envs / % s ' % ( user , app _ dir )
local _ env [ ' python ' ] = ' / home / % s / Envs / % s / bin / python ' % ( user , app _ dir )
local _ env [ ' pip ' ] = ' / home / % s / Envs / % s / bin / pip ' % ( user , app _ dir )
local _ env [ ' prefix ' ] = prefix
local _ env [ ' environment ' ] = environment
local _ env [ ' host _ string ' ] = host _ string
local _ env [ ' is _ feature ' ] = False
return local _ env
source _ env = populate _ env _ dict ( source _ server , source _ env )
destination _ env = populate _ env _ dict ( destination _ server , destination _ env )
copy _ db ( source _ env , destination _ env )""" | env . update ( source_env )
local_file_path = _get_db ( )
# put the file on external file system
# clean external db
# load database into external file system
env . update ( destination_env )
with cd ( env . remote_path ) :
sudo ( 'mkdir -p backups' , user = env . remote_user )
sudo ( env . python + ' manage.py dump_database | gzip > backups/' + _sql_paths ( 'local' , datetime . now ( ) ) )
put ( local_file_path , 'backups' , use_sudo = True )
sudo ( env . python + ' manage.py clear_database' , user = env . remote_user )
if local_file_path . endswith ( '.gz' ) : # the path is the same here and there
sudo ( 'gzip -dc %s | %s manage.py dbshell' % ( local_file_path , env . python ) , user = env . remote_user )
else :
sudo ( '%s manage.py dbshell < %s ' % ( env . python , local_file_path ) , user = env . remote_user ) |
def _pullMessage ( self ) :
"""Call pull api with seq value to get message data .""" | data = { "msgs_recv" : 0 , "sticky_token" : self . _sticky , "sticky_pool" : self . _pool , "clientid" : self . _client_id , "state" : "active" if self . _markAlive else "offline" , }
return self . _get ( self . req_url . STICKY , data , fix_request = True , as_json = True ) |
def bootstrap_trajectories ( trajs , correlation_length ) :
"""Generates a randomly resampled count matrix given the input coordinates .
See API function for full documentation .""" | from scipy . stats import rv_discrete
# if we have just one trajectory , put it into a one - element list :
if ( isinstance ( trajs [ 0 ] , ( int , int , float ) ) ) :
trajs = [ trajs ]
ntraj = len ( trajs )
# determine correlation length to be used
lengths = determine_lengths ( trajs )
Ltot = np . sum ( lengths )
Lmax = np . max ( lengths )
if ( correlation_length < 1 ) :
correlation_length = Lmax
# assign probabilites to select trajectories
w_trajs = np . zeros ( ( len ( trajs ) ) )
for i in range ( ntraj ) :
w_trajs [ i ] = len ( trajs [ i ] )
w_trajs /= np . sum ( w_trajs )
# normalize to sum 1.0
distrib_trajs = rv_discrete ( values = ( list ( range ( ntraj ) ) , w_trajs ) )
# generate subtrajectories
Laccum = 0
subs = [ ]
while ( Laccum < Ltot ) : # pick a random trajectory
itraj = distrib_trajs . rvs ( )
# pick a starting frame
t0 = random . randint ( 0 , max ( 1 , len ( trajs [ itraj ] ) - correlation_length ) )
t1 = min ( len ( trajs [ itraj ] ) , t0 + correlation_length )
# add new subtrajectory
subs . append ( trajs [ itraj ] [ t0 : t1 ] )
# increment available states
Laccum += ( t1 - t0 )
# and return
return subs |
def create_class_instance ( element , element_id , doc_id ) :
"""given an Salt XML element , returns a corresponding ` SaltElement ` class
instance , i . e . a SaltXML ` SToken ` node will be converted into a
` TokenNode ` .
Parameters
element : lxml . etree . _ Element
an ` etree . _ Element ` is the XML representation of a Salt element ,
e . g . a single ' nodes ' or ' edges ' element
element _ id : int
the index of element ( used to connect edges to nodes )
doc _ id : str
the ID of the SaltXML document
Returns
salt _ element : SaltElement
an instance of a ` SaltElement ` subclass instance , e . g . a ` TokenNode ` ,
` TextualRelation ` or ` SaltLayer `""" | xsi_type = get_xsi_type ( element )
element_class = XSI_TYPE_CLASSES [ xsi_type ]
return element_class . from_etree ( element ) |
def jira_connection ( config ) :
"""Gets a JIRA API connection . If a connection has already been created the existing connection
will be returned .""" | global _jira_connection
if _jira_connection :
return _jira_connection
else :
jira_options = { 'server' : config . get ( 'jira' ) . get ( 'url' ) }
cookies = configuration . _get_cookies_as_dict ( )
jira_connection = jira_ext . JIRA ( options = jira_options )
session = jira_connection . _session
reused_session = False
if cookies :
requests . utils . add_dict_to_cookiejar ( session . cookies , cookies )
try :
jira_connection . session ( )
reused_session = True
except Exception :
pass
if not reused_session :
session . auth = ( config [ 'jira' ] [ 'username' ] , base64 . b64decode ( config [ 'jira' ] [ 'password' ] ) )
jira_connection . session ( )
session . auth = None
cookie_jar_hash = requests . utils . dict_from_cookiejar ( session . cookies )
for key , value in cookie_jar_hash . iteritems ( ) :
configuration . _save_cookie ( key , value )
_jira_connection = jira_connection
return _jira_connection |
def validate ( self , value ) :
"""Re - implements the orb . Column . validate method to verify that the
reference model type that is used with this column instance is
the type of value being provided .
: param value : < variant >
: return : < bool >""" | ref_model = self . referenceModel ( )
if isinstance ( value , orb . Model ) :
expected_schema = ref_model . schema ( ) . name ( )
received_schema = value . schema ( ) . name ( )
if expected_schema != received_schema :
raise orb . errors . InvalidReference ( self . name ( ) , expects = expected_schema , received = received_schema )
return super ( ReferenceColumn , self ) . validate ( value ) |
def from_json_and_lambdas ( cls , file : str , lambdas ) :
"""Builds a GrFN from a JSON object .
Args :
cls : The class variable for object creation .
file : Filename of a GrFN JSON file .
Returns :
type : A GroundedFunctionNetwork object .""" | with open ( file , "r" ) as f :
data = json . load ( f )
return cls . from_dict ( data , lambdas ) |
def get_variants ( self , arch = None , types = None , recursive = False ) :
"""Return all variants of given arch and types .
Supported variant types :
self - include the top - level ( " self " ) variant as well
addon
variant
optional""" | types = types or [ ]
result = [ ]
if "self" in types :
result . append ( self )
for variant in six . itervalues ( self . variants ) :
if types and variant . type not in types :
continue
if arch and arch not in variant . arches . union ( [ "src" ] ) :
continue
result . append ( variant )
if recursive :
result . extend ( variant . get_variants ( types = [ i for i in types if i != "self" ] , recursive = True ) )
result . sort ( key = lambda x : x . uid )
return result |
def version ( self ) :
"""Fetch version information from all plugins and store in the report
version file""" | versions = [ ]
versions . append ( "sosreport: %s" % __version__ )
for plugname , plug in self . loaded_plugins :
versions . append ( "%s: %s" % ( plugname , plug . version ) )
self . archive . add_string ( content = "\n" . join ( versions ) , dest = 'version.txt' ) |
def mouseDoubleClickEvent ( self , event ) :
"""Emits the node double clicked event when a node is double clicked .
: param event | < QMouseDoubleClickEvent >""" | super ( XNodeScene , self ) . mouseDoubleClickEvent ( event )
# emit the node double clicked signal
if event . button ( ) == Qt . LeftButton :
item = self . itemAt ( event . scenePos ( ) )
if not item :
self . clearSelection ( )
else :
blocked = self . signalsBlocked ( )
self . blockSignals ( True )
self . clearSelection ( )
item . setSelected ( True )
self . blockSignals ( blocked )
if isinstance ( item , XNode ) and not blocked :
self . nodeDoubleClicked . emit ( item )
elif isinstance ( item , XNodeConnection ) and not blocked :
self . connectionDoubleClicked . emit ( item )
if not blocked :
self . itemDoubleClicked . emit ( item ) |
def can ( user , action , subject ) :
"""Checks if a given user has the ability to perform the action on a subject
: param user : A user object
: param action : an action string , typically ' read ' , ' edit ' , ' manage ' . Use bouncer . constants for readability
: param subject : the resource in question . Either a Class or an instance of a class . Pass the class if you
want to know if the user has general access to perform the action on that type of object . Or
pass a specific object , if you want to know if the user has the ability to that specific instance
: returns : Boolean""" | ability = Ability ( user , get_authorization_method ( ) )
return ability . can ( action , subject ) |
def read_anchors ( ac , qorder , sorder , minsize = 0 ) :
"""anchors file are just ( geneA , geneB ) pairs ( with possible deflines )""" | all_anchors = defaultdict ( list )
nanchors = 0
anchor_to_block = { }
for a , b , idx in ac . iter_pairs ( minsize = minsize ) :
if a not in qorder or b not in sorder :
continue
qi , q = qorder [ a ]
si , s = sorder [ b ]
pair = ( qi , si )
all_anchors [ ( q . seqid , s . seqid ) ] . append ( pair )
anchor_to_block [ pair ] = idx
nanchors += 1
logging . debug ( "A total of {0} anchors imported." . format ( nanchors ) )
assert nanchors == len ( anchor_to_block )
return all_anchors , anchor_to_block |
def get_predicted_log_arr ( self ) :
'''getter''' | if isinstance ( self . __predicted_log_arr , np . ndarray ) :
return self . __predicted_log_arr
else :
raise TypeError ( ) |
def cut ( self , start = None , stop = None , whence = 0 , version = None , include_ends = True , time_from_zero = False , ) :
"""cut * MDF * file . * start * and * stop * limits are absolute values
or values relative to the first timestamp depending on the * whence *
argument .
Parameters
start : float
start time , default * None * . If * None * then the start of measurement
is used
stop : float
stop time , default * None * . If * None * then the end of measurement is
used
whence : int
how to search for the start and stop values
* 0 : absolute
* 1 : relative to first timestamp
version : str
new mdf file version from ( ' 2.00 ' , ' 2.10 ' , ' 2.14 ' , ' 3.00 ' , ' 3.10 ' ,
'3.20 ' , ' 3.30 ' , ' 4.00 ' , ' 4.10 ' , ' 4.11 ' ) ; default * None * and in this
case the original file version is used
include _ ends : bool
include the * start * and * stop * timestamps after cutting the signal .
If * start * and * stop * are found in the original timestamps , then
the new samples will be computed using interpolation . Default * True *
time _ from _ zero : bool
start time stamps from 0s in the cut measurement
Returns
out : MDF
new MDF object""" | if version is None :
version = self . version
else :
version = validate_version_argument ( version )
out = MDF ( version = version )
if whence == 1 :
timestamps = [ ]
for i , group in enumerate ( self . groups ) :
fragment = next ( self . _load_data ( group ) )
master = self . get_master ( i , record_offset = 0 , record_count = 1 , )
if master . size :
timestamps . append ( master [ 0 ] )
self . _master_channel_cache . clear ( )
if timestamps :
first_timestamp = np . amin ( timestamps )
else :
first_timestamp = 0
if start is not None :
start += first_timestamp
if stop is not None :
stop += first_timestamp
if time_from_zero :
delta = start
t_epoch = self . header . start_time . timestamp ( ) + delta
out . header . start_time = datetime . fromtimestamp ( t_epoch )
else :
delta = 0
out . header . start_time = self . header . start_time
groups_nr = len ( self . groups )
if self . _callback :
self . _callback ( 0 , groups_nr )
cg_nr = - 1
interpolation_mode = self . _integer_interpolation
# walk through all groups and get all channels
for i , group in enumerate ( self . groups ) :
included_channels = self . _included_channels ( i )
if included_channels :
cg_nr += 1
else :
continue
data = self . _load_data ( group )
parents , dtypes = self . _prepare_record ( group )
idx = 0
for fragment in data :
if dtypes . itemsize :
group . record = np . core . records . fromstring ( fragment [ 0 ] , dtype = dtypes )
else :
group . record = None
master = self . get_master ( i , fragment , copy_master = False )
if not len ( master ) :
continue
needs_cutting = True
# check if this fragement is within the cut interval or
# if the cut interval has ended
if start is None and stop is None :
fragment_start = None
fragment_stop = None
start_index = 0
stop_index = len ( master )
needs_cutting = False
elif start is None :
fragment_start = None
start_index = 0
if master [ 0 ] > stop :
break
else :
fragment_stop = min ( stop , master [ - 1 ] )
stop_index = np . searchsorted ( master , fragment_stop , side = "right" )
if stop_index == len ( master ) :
needs_cutting = False
elif stop is None :
fragment_stop = None
if master [ - 1 ] < start :
continue
else :
fragment_start = max ( start , master [ 0 ] )
start_index = np . searchsorted ( master , fragment_start , side = "left" )
stop_index = len ( master )
if start_index == 0 :
needs_cutting = False
else :
if master [ 0 ] > stop :
break
elif master [ - 1 ] < start :
continue
else :
fragment_start = max ( start , master [ 0 ] )
start_index = np . searchsorted ( master , fragment_start , side = "left" )
fragment_stop = min ( stop , master [ - 1 ] )
stop_index = np . searchsorted ( master , fragment_stop , side = "right" )
if start_index == 0 and stop_index == len ( master ) :
needs_cutting = False
if needs_cutting :
cut_timebase = ( Signal ( master , master , name = "_" ) . cut ( fragment_start , fragment_stop , include_ends , interpolation_mode = interpolation_mode ) . timestamps )
# the first fragment triggers and append that will add the
# metadata for all channels
if idx == 0 :
sigs = [ ]
for j in included_channels :
sig = self . get ( group = i , index = j , data = fragment , raw = True , ignore_invalidation_bits = True , copy_master = False , )
if needs_cutting :
sig = sig . interp ( cut_timebase , interpolation_mode = interpolation_mode )
# if sig . stream _ sync and False :
# attachment , _ name = sig . attachment
# duration = get _ video _ stream _ duration ( attachment )
# if start is None :
# start _ t = 0
# else :
# start _ t = start
# if stop is None :
# end _ t = duration
# else :
# end _ t = stop
# attachment = cut _ video _ stream (
# attachment ,
# start _ t ,
# end _ t ,
# Path ( _ name ) . suffix ,
# sig . attachment = attachment , _ name
if not sig . samples . flags . writeable :
sig . samples = sig . samples . copy ( )
sigs . append ( sig )
if sigs :
if time_from_zero :
new_timestamps = cut_timebase - delta
for sig in sigs :
sig . timestamps = new_timestamps
if start :
start_ = f"{start}s"
else :
start_ = "start of measurement"
if stop :
stop_ = f"{stop}s"
else :
stop_ = "end of measurement"
out . append ( sigs , f"Cut from {start_} to {stop_}" , common_timebase = True )
try :
if group . channel_group . flags & v4c . FLAG_CG_BUS_EVENT :
out . groups [ - 1 ] . channel_group . flags = group . channel_group . flags
out . groups [ - 1 ] . channel_group . acq_name = group . channel_group . acq_name
out . groups [ - 1 ] . channel_group . acq_source = group . channel_group . acq_source
out . groups [ - 1 ] . channel_group . comment = group . channel_group . comment
except AttributeError :
pass
else :
break
idx += 1
# the other fragments will trigger onl the extension of
# samples records to the data block
else :
if needs_cutting :
timestamps = cut_timebase
else :
timestamps = master
if time_from_zero :
timestamps = timestamps - delta
sigs = [ ( timestamps , None ) ]
for j in included_channels :
sig = self . get ( group = i , index = j , data = fragment , raw = True , samples_only = True , ignore_invalidation_bits = True , )
if needs_cutting :
_sig = Signal ( sig [ 0 ] , master , name = "_" , invalidation_bits = sig [ 1 ] ) . interp ( cut_timebase , interpolation_mode = interpolation_mode )
sig = ( _sig . samples , _sig . invalidation_bits )
del _sig
sigs . append ( sig )
if sigs :
out . extend ( cg_nr , sigs )
idx += 1
group . record = None
# if the cut interval is not found in the measurement
# then append an empty data group
if idx == 0 :
self . configure ( read_fragment_size = 1 )
sigs = [ ]
fragment = next ( self . _load_data ( group ) )
fragment = ( fragment [ 0 ] , - 1 , None )
for j in included_channels :
sig = self . get ( group = i , index = j , data = fragment , raw = True , ignore_invalidation_bits = True , )
sig . samples = sig . samples [ : 0 ]
sig . timestamps = sig . timestamps [ : 0 ]
sigs . append ( sig )
if start :
start_ = f"{start}s"
else :
start_ = "start of measurement"
if stop :
stop_ = f"{stop}s"
else :
stop_ = "end of measurement"
out . append ( sigs , f"Cut from {start_} to {stop_}" , common_timebase = True )
self . configure ( read_fragment_size = 0 )
if self . _callback :
self . _callback ( i + 1 , groups_nr )
if self . _terminate :
return
out . _transfer_events ( self )
if self . _callback :
out . _callback = out . _mdf . _callback = self . _callback
return out |
def set_power ( self , state ) :
"""Sets the power state of the smart plug .""" | packet = bytearray ( 16 )
packet [ 0 ] = 2
if self . check_nightlight ( ) :
packet [ 4 ] = 3 if state else 2
else :
packet [ 4 ] = 1 if state else 0
self . send_packet ( 0x6a , packet ) |
def make_venv ( self , dj_version ) :
"""Creates a virtual environment for a given Django version .
: param str dj _ version :
: rtype : str
: return : path to created virtual env""" | venv_path = self . _get_venv_path ( dj_version )
self . logger . info ( 'Creating virtual environment for Django %s ...' % dj_version )
try :
create_venv ( venv_path , ** VENV_CREATE_KWARGS )
except ValueError :
self . logger . warning ( 'Virtual environment directory already exists. Skipped.' )
self . venv_install ( 'django==%s' % dj_version , venv_path )
return venv_path |
def setRecord ( self , record ) :
"""Sets the record instance for this widget to the inputed record .
: param record | < orb . Table >""" | self . _record = record
if ( not record ) :
return
self . setModel ( record . __class__ )
schema = self . model ( ) . schema ( )
# set the information
column_edits = self . findChildren ( XOrbColumnEdit )
for widget in column_edits :
columnName = widget . columnName ( )
column = schema . column ( columnName )
if ( not column ) :
logger . warning ( '%s is not a valid column of %s.' % ( columnName , schema . name ( ) ) )
continue
# update the values
widget . setValue ( record . recordValue ( columnName ) ) |
def get_device_state ( self , device , id_override = None , type_override = None ) :
"""Get device state via online API .
Args :
device ( WinkDevice ) : The device the change is being requested for .
id _ override ( String , optional ) : A device ID used to override the
passed in device ' s ID . Used to make changes on sub - devices .
i . e . Outlet in a Powerstrip . The Parent device ' s ID .
type _ override ( String , optional ) : Used to override the device type
when a device inherits from a device other than WinkDevice .
Returns :
response _ json ( Dict ) : The API ' s response in dictionary format""" | _LOGGER . info ( "Getting state via online API" )
object_id = id_override or device . object_id ( )
object_type = type_override or device . object_type ( )
url_string = "{}/{}s/{}" . format ( self . BASE_URL , object_type , object_id )
arequest = requests . get ( url_string , headers = API_HEADERS )
response_json = arequest . json ( )
_LOGGER . debug ( '%s' , response_json )
return response_json |
def mul ( lhs : Any , rhs : Any , default : Any = RaiseTypeErrorIfNotProvided ) -> Any :
"""Returns lhs * rhs , or else a default if the operator is not implemented .
This method is mostly used by _ _ pow _ _ methods trying to return
NotImplemented instead of causing a TypeError .
Args :
lhs : Left hand side of the multiplication .
rhs : Right hand side of the multiplication .
default : Default value to return if the multiplication is not defined .
If not default is specified , a type error is raised when the
multiplication fails .
Returns :
The product of the two inputs , or else the default value if the product
is not defined , or else raises a TypeError if no default is defined .
Raises :
TypeError :
lhs doesn ' t have _ _ mul _ _ or it returned NotImplemented
AND lhs doesn ' t have _ _ rmul _ _ or it returned NotImplemented
AND a default value isn ' t specified .""" | # Use left - hand - side ' s _ _ mul _ _ .
left_mul = getattr ( lhs , '__mul__' , None )
result = NotImplemented if left_mul is None else left_mul ( rhs )
# Fallback to right - hand - side ' s _ _ rmul _ _ .
if result is NotImplemented :
right_mul = getattr ( rhs , '__rmul__' , None )
result = NotImplemented if right_mul is None else right_mul ( lhs )
# Don ' t build up factors of 1.0 vs sympy Symbols .
if lhs == 1 and is_parameterized ( rhs ) :
result = rhs
if rhs == 1 and is_parameterized ( lhs ) :
result = lhs
if lhs == - 1 and is_parameterized ( rhs ) :
result = - rhs
if rhs == - 1 and is_parameterized ( lhs ) :
result = - lhs
# Output .
if result is not NotImplemented :
return result
if default is not RaiseTypeErrorIfNotProvided :
return default
raise TypeError ( "unsupported operand type(s) for *: '{}' and '{}'" . format ( type ( lhs ) , type ( rhs ) ) ) |
def parse_entry ( self , row ) :
"""Parse an individual VCF entry and return a VCFEntry which contains information about
the call ( such as alternative allele , zygosity , etc . )""" | var_call = VCFEntry ( self . individuals )
var_call . parse_entry ( row )
return var_call |
def mc_sample_path ( P , init = 0 , sample_size = 1000 , random_state = None ) :
"""Generates one sample path from the Markov chain represented by
( n x n ) transition matrix P on state space S = { { 0 , . . . , n - 1 } } .
Parameters
P : array _ like ( float , ndim = 2)
A Markov transition matrix .
init : array _ like ( float ndim = 1 ) or scalar ( int ) , optional ( default = 0)
If init is an array _ like , then it is treated as the initial
distribution across states . If init is a scalar , then it
treated as the deterministic initial state .
sample _ size : scalar ( int ) , optional ( default = 1000)
The length of the sample path .
random _ state : int or np . random . RandomState , optional
Random seed ( integer ) or np . random . RandomState instance to set
the initial state of the random number generator for
reproducibility . If None , a randomly initialized RandomState is
used .
Returns
X : array _ like ( int , ndim = 1)
The simulation of states .""" | random_state = check_random_state ( random_state )
if isinstance ( init , numbers . Integral ) :
X_0 = init
else :
cdf0 = np . cumsum ( init )
u_0 = random_state . random_sample ( )
X_0 = searchsorted ( cdf0 , u_0 )
mc = MarkovChain ( P )
return mc . simulate ( ts_length = sample_size , init = X_0 , random_state = random_state ) |
def build_joblist ( jobgraph ) :
"""Returns a list of jobs , from a passed jobgraph .""" | jobset = set ( )
for job in jobgraph :
jobset = populate_jobset ( job , jobset , depth = 1 )
return list ( jobset ) |
def _install_hiero ( use_threaded_wrapper ) :
"""Helper function to The Foundry Hiero support""" | import hiero
import nuke
if "--hiero" not in nuke . rawArgs :
raise ImportError
def threaded_wrapper ( func , * args , ** kwargs ) :
return hiero . core . executeInMainThreadWithResult ( func , args , kwargs )
_common_setup ( "Hiero" , threaded_wrapper , use_threaded_wrapper ) |
def aveknt ( t , k ) :
"""Compute the running average of ` k ` successive elements of ` t ` . Return the averaged array .
Parameters :
Python list or rank - 1 array
int , > = 2 , how many successive elements to average
Returns :
rank - 1 array , averaged data . If k > len ( t ) , returns a zero - length array .
Caveat :
This is slightly different from MATLAB ' s aveknt , which returns the running average
of ` k ` - 1 successive elements of ` ` t [ 1 : - 1 ] ` ` ( and the empty vector if ` ` len ( t ) - 2 < k - 1 ` ` ) .""" | t = np . atleast_1d ( t )
if t . ndim > 1 :
raise ValueError ( "t must be a list or a rank-1 array" )
n = t . shape [ 0 ]
u = max ( 0 , n - ( k - 1 ) )
# number of elements in the output array
out = np . empty ( ( u , ) , dtype = t . dtype )
for j in range ( u ) :
out [ j ] = sum ( t [ j : ( j + k ) ] ) / k
return out |
def urls ( self ) :
"""Get all of the api endpoints .
NOTE : only for django as of now .
NOTE : urlpatterns are deprecated since Django1.8
: return list : urls""" | from django . conf . urls import url
urls = [ url ( r'^$' , self . documentation ) , url ( r'^map$' , self . map_view ) , ]
for resource_name in self . resource_map :
urls . extend ( [ url ( r'(?P<resource_name>{})$' . format ( resource_name ) , self . handler_view ) , url ( r'(?P<resource_name>{})/(?P<ids>[\w\-\,]+)$' . format ( resource_name ) , self . handler_view ) , ] )
return urls |
def pid ( self ) :
"""The process ' PID .""" | # Support for pexpect ' s functionality .
if hasattr ( self . subprocess , 'proc' ) :
return self . subprocess . proc . pid
# Standard subprocess method .
return self . subprocess . pid |
def writeln ( self , text , fg = 'black' , bg = 'white' ) :
'''write to the console with linefeed''' | if not isinstance ( text , str ) :
text = str ( text )
self . write ( text + '\n' , fg = fg , bg = bg ) |
def _pop_colors_and_alpha ( glyphclass , kwargs , prefix = "" , default_alpha = 1.0 ) :
"""Given a kwargs dict , a prefix , and a default value , looks for different
color and alpha fields of the given prefix , and fills in the default value
if it doesn ' t exist .""" | result = dict ( )
# TODO : The need to do this and the complexity of managing this kind of
# thing throughout the codebase really suggests that we need to have
# a real stylesheet class , where defaults and Types can declaratively
# substitute for this kind of imperative logic .
color = kwargs . pop ( prefix + "color" , get_default_color ( ) )
for argname in ( "fill_color" , "line_color" ) :
if argname not in glyphclass . properties ( ) :
continue
result [ argname ] = kwargs . pop ( prefix + argname , color )
# NOTE : text fill color should really always default to black , hard coding
# this here now until the stylesheet solution exists
if "text_color" in glyphclass . properties ( ) :
result [ "text_color" ] = kwargs . pop ( prefix + "text_color" , "black" )
alpha = kwargs . pop ( prefix + "alpha" , default_alpha )
for argname in ( "fill_alpha" , "line_alpha" , "text_alpha" ) :
if argname not in glyphclass . properties ( ) :
continue
result [ argname ] = kwargs . pop ( prefix + argname , alpha )
return result |
def create_config ( cls , params , prefix = None , name = None ) :
"""Create a new : class : ` . Config ` container .
Invoked during initialisation , it overrides defaults
with ` ` params ` ` and apply the ` ` prefix ` ` to non global
settings .""" | if isinstance ( cls . cfg , Config ) :
cfg = cls . cfg . copy ( name = name , prefix = prefix )
else :
cfg = cls . cfg . copy ( )
if name :
cfg [ name ] = name
if prefix :
cfg [ prefix ] = prefix
cfg = Config ( ** cfg )
cfg . update_settings ( )
cfg . update ( params , True )
return cfg |
def bracket_level ( text , open = { '(' , '[' , '{' } , close = { ')' , ']' , '}' } ) :
"""Return 0 if string contains balanced brackets or no brackets .""" | level = 0
for c in text :
if c in open :
level += 1
elif c in close :
level -= 1
return level |
def add_hash_memo ( self , memo_hash ) :
"""Set the memo for the transaction to a new : class : ` HashMemo
< stellar _ base . memo . HashMemo > ` .
: param memo _ hash : A 32 byte hash or hex encoded string to use as the memo .
: type memo _ hash : bytes , str
: return : This builder instance .""" | memo_hash = memo . HashMemo ( memo_hash )
return self . add_memo ( memo_hash ) |
def create_chebyshev_samples ( order , dim = 1 ) :
"""Chebyshev sampling function .
Args :
order ( int ) :
The number of samples to create along each axis .
dim ( int ) :
The number of dimensions to create samples for .
Returns :
samples following Chebyshev sampling scheme mapped to the
` ` [ 0 , 1 ] ^ dim ` ` hyper - cube and ` ` shape = = ( dim , order ) ` ` .""" | x_data = .5 * numpy . cos ( numpy . arange ( order , 0 , - 1 ) * numpy . pi / ( order + 1 ) ) + .5
x_data = chaospy . quad . combine ( [ x_data ] * dim )
return x_data . T |
def tokens_for_completion ( self , line : str , begidx : int , endidx : int ) -> Tuple [ List [ str ] , List [ str ] ] :
"""Used by tab completion functions to get all tokens through the one being completed
: param line : the current input line with leading whitespace removed
: param begidx : the beginning index of the prefix text
: param endidx : the ending index of the prefix text
: return : A 2 item tuple where the items are
On Success
tokens : list of unquoted tokens
this is generally the list needed for tab completion functions
raw _ tokens : list of tokens with any quotes preserved
this can be used to know if a token was quoted or is missing a closing quote
Both lists are guaranteed to have at least 1 item
The last item in both lists is the token being tab completed
On Failure
Two empty lists""" | import copy
unclosed_quote = ''
quotes_to_try = copy . copy ( constants . QUOTES )
tmp_line = line [ : endidx ]
tmp_endidx = endidx
# Parse the line into tokens
while True :
try :
initial_tokens = shlex_split ( tmp_line [ : tmp_endidx ] )
# If the cursor is at an empty token outside of a quoted string ,
# then that is the token being completed . Add it to the list .
if not unclosed_quote and begidx == tmp_endidx :
initial_tokens . append ( '' )
break
except ValueError as ex : # Make sure the exception was due to an unclosed quote and
# we haven ' t exhausted the closing quotes to try
if str ( ex ) == "No closing quotation" and quotes_to_try : # Add a closing quote and try to parse again
unclosed_quote = quotes_to_try [ 0 ]
quotes_to_try = quotes_to_try [ 1 : ]
tmp_line = line [ : endidx ]
tmp_line += unclosed_quote
tmp_endidx = endidx + 1
else : # The parsing error is not caused by unclosed quotes .
# Return empty lists since this means the line is malformed .
return [ ] , [ ]
if self . allow_redirection : # Since redirection is enabled , we need to treat redirection characters ( | , < , > )
# as word breaks when they are in unquoted strings . Go through each token
# and further split them on these characters . Each run of redirect characters
# is treated as a single token .
raw_tokens = [ ]
for cur_initial_token in initial_tokens : # Save tokens up to 1 character in length or quoted tokens . No need to parse these .
if len ( cur_initial_token ) <= 1 or cur_initial_token [ 0 ] in constants . QUOTES :
raw_tokens . append ( cur_initial_token )
continue
# Iterate over each character in this token
cur_index = 0
cur_char = cur_initial_token [ cur_index ]
# Keep track of the token we are building
cur_raw_token = ''
while True :
if cur_char not in constants . REDIRECTION_CHARS : # Keep appending to cur _ raw _ token until we hit a redirect char
while cur_char not in constants . REDIRECTION_CHARS :
cur_raw_token += cur_char
cur_index += 1
if cur_index < len ( cur_initial_token ) :
cur_char = cur_initial_token [ cur_index ]
else :
break
else :
redirect_char = cur_char
# Keep appending to cur _ raw _ token until we hit something other than redirect _ char
while cur_char == redirect_char :
cur_raw_token += cur_char
cur_index += 1
if cur_index < len ( cur_initial_token ) :
cur_char = cur_initial_token [ cur_index ]
else :
break
# Save the current token
raw_tokens . append ( cur_raw_token )
cur_raw_token = ''
# Check if we ' ve viewed all characters
if cur_index >= len ( cur_initial_token ) :
break
else :
raw_tokens = initial_tokens
# Save the unquoted tokens
tokens = [ utils . strip_quotes ( cur_token ) for cur_token in raw_tokens ]
# If the token being completed had an unclosed quote , we need
# to remove the closing quote that was added in order for it
# to match what was on the command line .
if unclosed_quote :
raw_tokens [ - 1 ] = raw_tokens [ - 1 ] [ : - 1 ]
return tokens , raw_tokens |
def using ( _other , ** kwargs ) :
"""Callback that processes the match with a different lexer .
The keyword arguments are forwarded to the lexer , except ` state ` which
is handled separately .
` state ` specifies the state that the new lexer will start in , and can
be an enumerable such as ( ' root ' , ' inline ' , ' string ' ) or a simple
string which is assumed to be on top of the root state .
Note : For that to work , ` _ other ` must not be an ` ExtendedRegexLexer ` .""" | gt_kwargs = { }
if 'state' in kwargs :
s = kwargs . pop ( 'state' )
if isinstance ( s , ( list , tuple ) ) :
gt_kwargs [ 'stack' ] = s
else :
gt_kwargs [ 'stack' ] = ( 'root' , s )
if _other is this :
def callback ( lexer , match , ctx = None ) : # if keyword arguments are given the callback
# function has to create a new lexer instance
if kwargs : # XXX : cache that somehow
kwargs . update ( lexer . options )
lx = lexer . __class__ ( ** kwargs )
else :
lx = lexer
s = match . start ( )
for i , t , v in lx . get_tokens_unprocessed ( match . group ( ) , ** gt_kwargs ) :
yield i + s , t , v
if ctx :
ctx . pos = match . end ( )
else :
def callback ( lexer , match , ctx = None ) : # XXX : cache that somehow
kwargs . update ( lexer . options )
lx = _other ( ** kwargs )
s = match . start ( )
for i , t , v in lx . get_tokens_unprocessed ( match . group ( ) , ** gt_kwargs ) :
yield i + s , t , v
if ctx :
ctx . pos = match . end ( )
return callback |
def parse_metadata_string ( metadata_string ) :
"""Grab end time with regular expression .""" | regex = r"STOP_DATE.+?VALUE\s*=\s*\"(.+?)\""
match = re . search ( regex , metadata_string , re . DOTALL )
end_time_str = match . group ( 1 )
return end_time_str |
def leaves ( self , nodes = None , unique = True ) :
"""Get the leaves of the tree starting at this root .
Args :
nodes ( iterable ) : limit leaves for these node names
unique : only include individual leaf nodes once
Returns :
list of leaf nodes""" | if nodes is None :
return super ( DependencyTree , self ) . leaves ( unique = unique )
res = list ( )
for child_id in nodes :
for sub_child in self . _all_nodes [ child_id ] . leaves ( unique = unique ) :
if not unique or sub_child not in res :
res . append ( sub_child )
return res |
def serial ( self ) :
"""Returns true if the CI server should run in serial mode .""" | serial = self . property_get ( "SERIAL" , False )
if isinstance ( serial , str ) :
return serial . lower ( ) == "true"
else :
return serial |
def require_component_access ( view_func , component ) :
"""Perform component can _ access check to access the view .
: param component containing the view ( panel or dashboard ) .
Raises a : exc : ` ~ horizon . exceptions . NotAuthorized ` exception if the
user cannot access the component containing the view .
By example the check of component policy rules will be applied to its
views .""" | from horizon . exceptions import NotAuthorized
@ functools . wraps ( view_func , assigned = available_attrs ( view_func ) )
def dec ( request , * args , ** kwargs ) :
if not component . can_access ( { 'request' : request } ) :
raise NotAuthorized ( _ ( "You are not authorized to access %s" ) % request . path )
return view_func ( request , * args , ** kwargs )
return dec |
def make_route ( self , dst , gw = None , dev = None ) :
"""Internal function : create a route for ' dst ' via ' gw ' .""" | prefix , plen = ( dst . split ( "/" ) + [ "128" ] ) [ : 2 ]
plen = int ( plen )
if gw is None :
gw = "::"
if dev is None :
dev , ifaddr , x = self . route ( gw )
else : # TODO : do better than that
# replace that unique address by the list of all addresses
lifaddr = in6_getifaddr ( )
# filter ( lambda x : x [ 2 ] = = dev , lifaddr )
devaddrs = [ i for i in lifaddr if i [ 2 ] == dev ]
ifaddr = construct_source_candidate_set ( prefix , plen , devaddrs , LOOPBACK_NAME )
return ( prefix , plen , gw , dev , ifaddr ) |
def watch_zone ( self , zone_id ) :
"""Add a zone to the watchlist .
Zones on the watchlist will push all
state changes ( and those of the source they are currently connected to )
back to the client""" | r = yield from self . _send_cmd ( "WATCH %s ON" % ( zone_id . device_str ( ) , ) )
self . _watched_zones . add ( zone_id )
return r |
def NewFromHsl ( h , s , l , alpha = 1.0 , wref = _DEFAULT_WREF ) :
'''Create a new instance based on the specifed HSL values .
Parameters :
The Hue component value [ 0 . . . 1]
The Saturation component value [ 0 . . . 1]
The Lightness component value [ 0 . . . 1]
: alpha :
The color transparency [ 0 . . . 1 ] , default is opaque
: wref :
The whitepoint reference , default is 2 ° D65.
Returns :
A grapefruit . Color instance .
> > > Color . NewFromHsl ( 30 , 1 , 0.5)
(1.0 , 0.5 , 0.0 , 1.0)
> > > Color . NewFromHsl ( 30 , 1 , 0.5 , 0.5)
(1.0 , 0.5 , 0.0 , 0.5)''' | return Color ( ( h , s , l ) , 'hsl' , alpha , wref ) |
def display ( port = None , height = None ) :
"""Display a TensorBoard instance already running on this machine .
Args :
port : The port on which the TensorBoard server is listening , as an
` int ` , or ` None ` to automatically select the most recently
launched TensorBoard .
height : The height of the frame into which to render the TensorBoard
UI , as an ` int ` number of pixels , or ` None ` to use a default value
( currently 800 ) .""" | _display ( port = port , height = height , print_message = True , display_handle = None ) |
def get_snapshot ( self , entity_id , lt = None , lte = None ) :
"""Gets the last snapshot for entity , optionally until a particular version number .
: rtype : Snapshot""" | snapshots = self . snapshot_store . get_domain_events ( entity_id , lt = lt , lte = lte , limit = 1 , is_ascending = False )
if len ( snapshots ) == 1 :
return snapshots [ 0 ] |
def load_credential_file ( self , path ) :
"""Load a credential file as is setup like the Java utilities""" | c_data = StringIO . StringIO ( )
c_data . write ( "[Credentials]\n" )
for line in open ( path , "r" ) . readlines ( ) :
c_data . write ( line . replace ( "AWSAccessKeyId" , "aws_access_key_id" ) . replace ( "AWSSecretKey" , "aws_secret_access_key" ) )
c_data . seek ( 0 )
self . readfp ( c_data ) |
def _parse_number_from_substring ( smoothie_substring ) :
'''Returns the number in the expected string " N : 12.3 " , where " N " is the
axis , and " 12.3 " is a floating point value for the axis ' position''' | try :
return round ( float ( smoothie_substring . split ( ':' ) [ 1 ] ) , GCODE_ROUNDING_PRECISION )
except ( ValueError , IndexError , TypeError , AttributeError ) :
log . exception ( 'Unexpected argument to _parse_number_from_substring:' )
raise ParseError ( 'Unexpected argument to _parse_number_from_substring: {}' . format ( smoothie_substring ) ) |
def _set_hyperparameters ( self , parameters ) :
"""Set internal optimization parameters .""" | for name , value in parameters . iteritems ( ) :
try :
getattr ( self , name )
except AttributeError :
raise ValueError ( 'Each parameter in parameters must be an attribute. ' '{} is not.' . format ( name ) )
setattr ( self , name , value ) |
def get_posix ( self , i ) :
"""Get POSIX .""" | index = i . index
value = [ '[' ]
try :
c = next ( i )
if c != ':' :
raise ValueError ( 'Not a valid property!' )
else :
value . append ( c )
c = next ( i )
if c == '^' :
value . append ( c )
c = next ( i )
while c != ':' :
if c not in _PROPERTY :
raise ValueError ( 'Not a valid property!' )
if c not in _PROPERTY_STRIP :
value . append ( c )
c = next ( i )
value . append ( c )
c = next ( i )
if c != ']' or not value :
raise ValueError ( 'Unmatched ]' )
value . append ( c )
except Exception :
i . rewind ( i . index - index )
value = [ ]
return '' . join ( value ) if value else None |
def fetch_partial ( self , container , obj , size ) :
"""Returns the first ' size ' bytes of an object . If the object is smaller
than the specified ' size ' value , the entire object is returned .""" | return self . _manager . fetch_partial ( container , obj , size ) |
async def build_task_dependencies ( chain , task , name , my_task_id ) :
"""Recursively build the task dependencies of a task .
Args :
chain ( ChainOfTrust ) : the chain of trust to add to .
task ( dict ) : the task definition to operate on .
name ( str ) : the name of the task to operate on .
my _ task _ id ( str ) : the taskId of the task to operate on .
Raises :
CoTError : on failure .""" | log . info ( "build_task_dependencies {} {}" . format ( name , my_task_id ) )
if name . count ( ':' ) > chain . context . config [ 'max_chain_length' ] :
raise CoTError ( "Too deep recursion!\n{}" . format ( name ) )
sorted_dependencies = find_sorted_task_dependencies ( task , name , my_task_id )
for task_name , task_id in sorted_dependencies :
if task_id not in chain . dependent_task_ids ( ) :
link = LinkOfTrust ( chain . context , task_name , task_id )
json_path = link . get_artifact_full_path ( 'task.json' )
try :
task_defn = await chain . context . queue . task ( task_id )
link . task = task_defn
chain . links . append ( link )
# write task json to disk
makedirs ( os . path . dirname ( json_path ) )
with open ( json_path , 'w' ) as fh :
fh . write ( format_json ( task_defn ) )
await build_task_dependencies ( chain , task_defn , task_name , task_id )
except TaskclusterFailure as exc :
raise CoTError ( str ( exc ) ) |
def canAdd ( self , filename ) :
"""Determines if a filename can be added to the depot under the current client
: param filename : File path to add
: type filename : str""" | try :
result = self . run ( [ 'add' , '-n' , '-t' , 'text' , filename ] ) [ 0 ]
except errors . CommandError as err :
LOGGER . debug ( err )
return False
if result . get ( 'code' ) not in ( 'error' , 'info' ) :
return True
LOGGER . warn ( 'Unable to add {}: {}' . format ( filename , result [ 'data' ] ) )
return False |
def __parseThunkData ( self , thunk , importSection ) :
"""Parses the data of a thunk and sets the data""" | offset = to_offset ( thunk . header . AddressOfData , importSection )
if 0xf0000000 & thunk . header . AddressOfData == 0x80000000 :
thunk . ordinal = thunk . header . AddressOfData & 0x0fffffff
else :
ibn = IMAGE_IMPORT_BY_NAME . from_buffer ( importSection . raw , offset )
checkOffset ( offset + 2 , importSection )
name = get_str ( importSection . raw , offset + 2 )
thunk . importByName = ImportByNameData ( header = ibn , hint = ibn . Hint , name = name ) |
def add_new_observations ( self , y , exogenous = None , ** kwargs ) :
"""Update the endog / exog samples after a model fit .
After fitting your model and creating forecasts , you ' re going
to need to attach new samples to the data you fit on . These are
used to compute new forecasts ( but using the same estimated
parameters ) .
Parameters
y : array - like or iterable , shape = ( n _ samples , )
The time - series data to add to the endogenous samples on which the
` ` ARIMA ` ` estimator was previously fit . This may either be a Pandas
` ` Series ` ` object or a numpy array . This should be a one -
dimensional array of finite floats .
exogenous : array - like , shape = [ n _ obs , n _ vars ] , optional ( default = None )
An optional 2 - d array of exogenous variables . If the model was
fit with an exogenous array of covariates , it will be required for
updating the observed values .
* * kwargs : keyword args
Any keyword args that should be passed as ` ` * * fit _ kwargs ` ` in the
new model fit .""" | return self . update ( y , exogenous , ** kwargs ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.