signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _shrink_list ( self , shrink ) :
"""Shrink list down to essentials
: param shrink : List to shrink
: type shrink : list
: return : Shrunk list
: rtype : list""" | res = [ ]
if len ( shrink ) == 1 :
return self . shrink ( shrink [ 0 ] )
else :
for a in shrink :
temp = self . shrink ( a )
if temp :
res . append ( temp )
return res |
def _connected ( self , link_uri ) :
"""This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded .""" | print ( 'Connected to %s' % link_uri )
# The definition of the logconfig can be made before connecting
self . _lg_stab = LogConfig ( name = 'Stabilizer' , period_in_ms = 10 )
self . _lg_stab . add_variable ( 'stabilizer.roll' , 'float' )
self . _lg_stab . add_variable ( 'stabilizer.pitch' , 'float' )
self . _lg_stab . add_variable ( 'stabilizer.yaw' , 'float' )
# Adding the configuration cannot be done until a Crazyflie is
# connected , since we need to check that the variables we
# would like to log are in the TOC .
try :
self . _cf . log . add_config ( self . _lg_stab )
# This callback will receive the data
self . _lg_stab . data_received_cb . add_callback ( self . _stab_log_data )
# This callback will be called on errors
self . _lg_stab . error_cb . add_callback ( self . _stab_log_error )
# Start the logging
self . _lg_stab . start ( )
except KeyError as e :
print ( 'Could not start log configuration,' '{} not found in TOC' . format ( str ( e ) ) )
except AttributeError :
print ( 'Could not add Stabilizer log config, bad configuration.' )
# Start a timer to disconnect in 10s
t = Timer ( 5 , self . _cf . close_link )
t . start ( ) |
def get_order ( self , codes ) :
"""Return evidence codes in order shown in code2name .""" | return sorted ( codes , key = lambda e : [ self . ev2idx . get ( e ) ] ) |
def copy_config ( self , original , new ) :
'''Copies collection configs into a new folder . Can be used to create new collections based on existing configs .
Basically , copies all nodes under / configs / original to / configs / new .
: param original str : ZK name of original config
: param new str : New name of the ZK config .''' | if not self . kz . exists ( '/configs/{}' . format ( original ) ) :
raise ZookeeperError ( "Collection doesn't exist in Zookeeper. Current Collections are: {}" . format ( self . kz . get_children ( '/configs' ) ) )
base = '/configs/{}' . format ( original )
nbase = '/configs/{}' . format ( new )
self . _copy_dir ( base , nbase ) |
def submit ( self , txn , timeout = None ) :
"""Submit a transaction .
Processes multiple requests in a single transaction .
A transaction increments the revision of the key - value store
and generates events with the same revision for every
completed request .
It is not allowed to modify the same key several times
within one transaction .
From google paxosdb paper :
Our implementation hinges around a powerful primitive which
we call MultiOp . All other database operations except for
iteration are implemented as a single call to MultiOp .
A MultiOp is applied atomically and consists of three components :
1 . A list of tests called guard . Each test in guard checks
a single entry in the database . It may check for the absence
or presence of a value , or compare with a given value .
Two different tests in the guard may apply to the same or
different entries in the database . All tests in the guard
are applied and MultiOp returns the results . If all tests
are true , MultiOp executes t op ( see item 2 below ) , otherwise
it executes f op ( see item 3 below ) .
2 . A list of database operations called t op . Each operation in
the list is either an insert , delete , or lookup operation , and
applies to a single database entry . Two different operations
in the list may apply to the same or different entries in
the database . These operations are executed if guard evaluates
to true .
3 . A list of database operations called f op . Like t op , but
executed if guard evaluates to false .
: param txn : The transaction to submit .
: type txn : instance of : class : ` txaioetcd . Transaction `
: param timeout : Request timeout in seconds .
: type timeout : int
: returns : An instance of : class : ` txaioetcd . Success ` or an exception
of : class : ` txioetcd . Failed ` or : class : ` txaioetcd . Error `
: rtype : instance of : class : ` txaioetcd . Success ` ,
: class : ` txaioetcd . Failed ` or : class : ` txaioetcd . Error `""" | def run ( pg_txn ) :
val = Json ( txn . _marshal ( ) )
pg_txn . execute ( "SELECT pgetcd.submit(%s,%s)" , ( val , 10 ) )
rows = pg_txn . fetchall ( )
res = "{0}" . format ( rows [ 0 ] [ 0 ] )
return res
return self . _pool . runInteraction ( run ) |
def insert_colorpoint ( self , position = 0.5 , color1 = [ 1.0 , 1.0 , 0.0 ] , color2 = [ 1.0 , 1.0 , 0.0 ] ) :
"""Inserts the specified color into the list .""" | L = self . _colorpoint_list
# if position = 0 or 1 , push the end points inward
if position <= 0.0 :
L . insert ( 0 , [ 0.0 , color1 , color2 ] )
elif position >= 1.0 :
L . append ( [ 1.0 , color1 , color2 ] )
# otherwise , find the position where it belongs
else : # loop over all the points
for n in range ( len ( self . _colorpoint_list ) ) : # check if it ' s less than the next one
if position <= L [ n + 1 ] [ 0 ] : # found the place to insert it
L . insert ( n + 1 , [ position , color1 , color2 ] )
break
# update the image with the new cmap
self . update_image ( )
return self |
def pixels_to_tiles ( self , coords , clamp = True ) :
"""Convert pixel coordinates into tile coordinates .
clamp determines if we should clamp the tiles to ones only on the tilemap .""" | tile_coords = Vector2 ( )
tile_coords . X = int ( coords [ 0 ] ) / self . spritesheet [ 0 ] . width
tile_coords . Y = int ( coords [ 1 ] ) / self . spritesheet [ 0 ] . height
if clamp :
tile_coords . X , tile_coords . Y = self . clamp_within_range ( tile_coords . X , tile_coords . Y )
return tile_coords |
def post_data ( self , job_id , body , params = None ) :
"""` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / ml - post - data . html > ` _
: arg job _ id : The name of the job receiving the data
: arg body : The data to process
: arg reset _ end : Optional parameter to specify the end of the bucket
resetting range
: arg reset _ start : Optional parameter to specify the start of the bucket
resetting range""" | for param in ( job_id , body ) :
if param in SKIP_IN_PATH :
raise ValueError ( "Empty value passed for a required argument." )
return self . transport . perform_request ( "POST" , _make_path ( "_ml" , "anomaly_detectors" , job_id , "_data" ) , params = params , body = self . _bulk_body ( body ) , ) |
def move ( self , path , raise_if_exists = False ) :
"""Alias for ` ` rename ( ) ` `""" | self . rename ( path , raise_if_exists = raise_if_exists ) |
def aromatize ( molecule , usedPyroles = None ) :
"""( molecule , usedPyroles = None ) - > aromatize a molecular graph
usedPyroles is a dictionary that holds the pyrole like
atoms that are used in the conversion process .
The following valence checker may need this information""" | pyroleLike = getPyroleLikeAtoms ( molecule )
if usedPyroles is None :
usedPyroles = { }
cyclesToCheck = [ ]
# determine which cycles came in marked as aromatic
# and which need to be checked form the kekular form
# if a cycle came in as aromatic , convert it
# before going on .
for cycle in molecule . cycles :
for atom in cycle . atoms :
if not atom . aromatic :
cyclesToCheck . append ( cycle )
break
else :
if not convert ( cycle , pyroleLike , usedPyroles ) : # XXX FIX ME
# oops , an aromatic ring came in but
# we can ' t convert it . This is an error
# daylight would conjugate the ring
raise PinkyError ( "Bad initial aromaticity" )
# keep checking rings until something happens
while 1 : # assume nothing happened
needToCheckAgain = 0
_cyclesToCheck = [ ]
for cycle in cyclesToCheck :
canAromatic = canBeAromatic ( cycle , pyroleLike )
if canAromatic == NEVER : # the ring can NEVER EVER be aromatic , so remove it for good
pass
elif canAromatic and convert ( cycle , pyroleLike , usedPyroles ) :
needToCheckAgain = 1
else :
_cyclesToCheck . append ( cycle )
cyclesToCheck = _cyclesToCheck
if not needToCheckAgain :
break
# fix bonds that have no bondorder if necessary
molecule = fixBonds ( molecule , pyroleLike )
# add implicit hydrogens
return addHydrogens ( molecule , usedPyroles ) |
def scale ( table ) :
"""scale table based on the column with the largest sum""" | t = [ ]
columns = [ [ ] for i in table [ 0 ] ]
for row in table :
for i , v in enumerate ( row ) :
columns [ i ] . append ( v )
sums = [ float ( sum ( i ) ) for i in columns ]
scale_to = float ( max ( sums ) )
scale_factor = [ scale_to / i for i in sums if i != 0 ]
for row in table :
t . append ( [ a * b for a , b in zip ( row , scale_factor ) ] )
return t |
def cache_file ( source ) :
'''Wrapper for cp . cache _ file which raises an error if the file was unable to
be cached .
CLI Example :
. . code - block : : bash
salt myminion container _ resource . cache _ file salt : / / foo / bar / baz . txt''' | try : # Don ' t just use cp . cache _ file for this . Docker has its own code to
# pull down images from the web .
if source . startswith ( 'salt://' ) :
cached_source = __salt__ [ 'cp.cache_file' ] ( source )
if not cached_source :
raise CommandExecutionError ( 'Unable to cache {0}' . format ( source ) )
return cached_source
except AttributeError :
raise SaltInvocationError ( 'Invalid source file {0}' . format ( source ) )
return source |
def get_as_datetime ( self , key ) :
"""Converts map element into a Date or returns the current date if conversion is not possible .
: param key : an index of element to get .
: return : Date value ot the element or the current date if conversion is not supported .""" | value = self . get ( key )
return DateTimeConverter . to_datetime ( value ) |
def addJunctionPos ( shape , fromPos , toPos ) :
"""Extends shape with the given positions in case they differ from the
existing endpoints . assumes that shape and positions have the same dimensionality""" | result = list ( shape )
if fromPos != shape [ 0 ] :
result = [ fromPos ] + result
if toPos != shape [ - 1 ] :
result . append ( toPos )
return result |
def save_xml ( self , doc , element ) :
'''Save this component into an xml . dom . Element object .''' | element . setAttributeNS ( XSI_NS , XSI_NS_S + 'type' , 'rtsExt:component_ext' )
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'id' , self . id )
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'pathUri' , self . path_uri )
if self . active_configuration_set :
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'activeConfigurationSet' , self . active_configuration_set )
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'instanceName' , self . instance_name )
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'compositeType' , comp_type . to_string ( self . composite_type ) )
element . setAttributeNS ( RTS_NS , RTS_NS_S + 'isRequired' , str ( self . is_required ) . lower ( ) )
if self . comment :
element . setAttributeNS ( RTS_EXT_NS , RTS_EXT_NS_S + 'comment' , self . comment )
element . setAttributeNS ( RTS_EXT_NS , RTS_EXT_NS_S + 'visible' , str ( self . visible ) . lower ( ) )
for port in self . data_ports :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'DataPorts' )
port . save_xml ( doc , new_element )
element . appendChild ( new_element )
for port in self . service_ports :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'ServicePorts' )
port . save_xml ( doc , new_element )
element . appendChild ( new_element )
for cs in self . configuration_sets :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'ConfigurationSets' )
cs . save_xml ( doc , new_element )
element . appendChild ( new_element )
for ec in self . execution_contexts :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'ExecutionContexts' )
ec . save_xml ( doc , new_element )
element . appendChild ( new_element )
for p in self . participants :
new_element = doc . createElementNS ( RTS_NS , RTS_NS_S + 'Participants' )
p . save_xml ( doc , new_element )
element . appendChild ( new_element )
new_element = doc . createElementNS ( RTS_EXT_NS , RTS_EXT_NS_S + 'Location' )
self . _location . save_xml ( doc , new_element )
element . appendChild ( new_element )
for p in self . properties :
new_prop_element = doc . createElementNS ( RTS_EXT_NS , RTS_EXT_NS_S + 'Properties' )
properties_to_xml ( new_prop_element , p , self . properties [ p ] )
element . appendChild ( new_prop_element ) |
def find_line ( scihdu , refhdu ) :
"""Obtain bin factors and corner location to extract
and bin the appropriate subset of a reference image to
match a science image .
If the science image has zero offset and is the same size and
binning as the reference image , ` ` same _ size ` ` will be set to
` True ` . Otherwise , the values of ` ` rx ` ` , ` ` ry ` ` , ` ` x0 ` ` , and
` ` y0 ` ` will be assigned .
Normally the science image will be binned the same or more
than the reference image . In that case , ` ` rx ` ` and ` ` ry ` `
will be the bin size of the science image divided by the
bin size of the reference image .
If the binning of the reference image is greater than the
binning of the science image , the ratios ( ` ` rx ` ` and ` ` ry ` ` )
of the bin sizes will be the reference image size divided by
the science image bin size . This is not necessarily an error .
. . note : : Translated from ` ` calacs / lib / findbin . c ` ` .
Parameters
scihdu , refhdu : obj
Extension HDU ' s of the science and reference image ,
respectively .
Returns
same _ size : bool
` True ` if zero offset and same size and binning .
rx , ry : int
Ratio of bin sizes .
x0 , y0 : int
Location of start of subimage in reference image .
Raises
ValueError
Science and reference data size mismatch .""" | sci_bin , sci_corner = get_corner ( scihdu . header )
ref_bin , ref_corner = get_corner ( refhdu . header )
# We can use the reference image directly , without binning
# and without extracting a subset .
if ( sci_corner [ 0 ] == ref_corner [ 0 ] and sci_corner [ 1 ] == ref_corner [ 1 ] and sci_bin [ 0 ] == ref_bin [ 0 ] and sci_bin [ 1 ] == ref_bin [ 1 ] and scihdu . data . shape [ 1 ] == refhdu . data . shape [ 1 ] ) :
same_size = True
rx = 1
ry = 1
x0 = 0
y0 = 0
# Reference image is binned more than the science image .
elif ref_bin [ 0 ] > sci_bin [ 0 ] or ref_bin [ 1 ] > sci_bin [ 1 ] :
same_size = False
rx = ref_bin [ 0 ] / sci_bin [ 0 ]
ry = ref_bin [ 1 ] / sci_bin [ 1 ]
x0 = ( sci_corner [ 0 ] - ref_corner [ 0 ] ) / ref_bin [ 0 ]
y0 = ( sci_corner [ 1 ] - ref_corner [ 1 ] ) / ref_bin [ 1 ]
# For subarray input images , whether they are binned or not .
else :
same_size = False
# Ratio of bin sizes .
ratiox = sci_bin [ 0 ] / ref_bin [ 0 ]
ratioy = sci_bin [ 1 ] / ref_bin [ 1 ]
if ( ratiox * ref_bin [ 0 ] != sci_bin [ 0 ] or ratioy * ref_bin [ 1 ] != sci_bin [ 1 ] ) :
raise ValueError ( 'Science and reference data size mismatch' )
# cshift is the offset in units of unbinned pixels .
# Divide by ref _ bin to convert to units of pixels in the ref image .
cshift = ( sci_corner [ 0 ] - ref_corner [ 0 ] , sci_corner [ 1 ] - ref_corner [ 1 ] )
xzero = cshift [ 0 ] / ref_bin [ 0 ]
yzero = cshift [ 1 ] / ref_bin [ 1 ]
if ( xzero * ref_bin [ 0 ] != cshift [ 0 ] or yzero * ref_bin [ 1 ] != cshift [ 1 ] ) :
warnings . warn ( 'Subimage offset not divisible by bin size' , AstropyUserWarning )
rx = ratiox
ry = ratioy
x0 = xzero
y0 = yzero
# Ensure integer index
x0 = int ( x0 )
y0 = int ( y0 )
return same_size , rx , ry , x0 , y0 |
def show_code ( co , version , file = None ) :
"""Print details of methods , functions , or code to * file * .
If * file * is not provided , the output is printed on stdout .""" | if file is None :
print ( code_info ( co , version ) )
else :
file . write ( code_info ( co , version ) + '\n' ) |
def run ( self ) :
"""Main thread function .""" | if not hasattr ( self , 'queue' ) :
raise RuntimeError ( "Audio queue is not intialized." )
chunk = None
channel = None
self . keep_listening = True
while self . keep_listening :
if chunk is None :
try :
frame = self . queue . get ( timeout = queue_timeout )
chunk = pygame . sndarray . make_sound ( frame )
except Empty :
continue
if channel is None :
channel = chunk . play ( )
else :
if not channel . get_queue ( ) :
channel . queue ( chunk )
chunk = None
time . sleep ( 0.005 )
if not channel is None and pygame . mixer . get_init ( ) :
channel . stop ( )
pygame . mixer . quit ( ) |
def _load_plugin_class ( menu , name ) :
"""Load Custodia plugin
Entry points are preferred over dotted import path .""" | group = 'custodia.{}' . format ( menu )
eps = list ( pkg_resources . iter_entry_points ( group , name ) )
if len ( eps ) > 1 :
raise ValueError ( "Multiple entry points for {} {}: {}" . format ( menu , name , eps ) )
elif len ( eps ) == 1 : # backwards compatibility with old setuptools
ep = eps [ 0 ]
if hasattr ( ep , 'resolve' ) :
return ep . resolve ( )
else :
return ep . load ( require = False )
elif '.' in name : # fall back to old style dotted name
module , classname = name . rsplit ( '.' , 1 )
m = importlib . import_module ( module )
return getattr ( m , classname )
else :
raise ValueError ( "{}: {} not found" . format ( menu , name ) ) |
def assert_is_lat ( val ) :
"""Checks it the given value is a feasible decimal latitude
: param val : value to be checked
: type val : int of float
: returns : ` None `
: raises : * ValueError * if value is out of latitude boundaries , * AssertionError * if type is wrong""" | assert type ( val ) is float or type ( val ) is int , "Value must be a number"
if val < - 90.0 or val > 90.0 :
raise ValueError ( "Latitude value must be between -90 and 90" ) |
def set_connection ( self , url ) :
"""Sets the connection URL to the address a Neo4j server is set up at""" | u = urlparse ( url )
if u . netloc . find ( '@' ) > - 1 and ( u . scheme == 'bolt' or u . scheme == 'bolt+routing' ) :
credentials , hostname = u . netloc . rsplit ( '@' , 1 )
username , password , = credentials . split ( ':' )
else :
raise ValueError ( "Expecting url format: bolt://user:password@localhost:7687" " got {0}" . format ( url ) )
self . driver = GraphDatabase . driver ( u . scheme + '://' + hostname , auth = basic_auth ( username , password ) , encrypted = config . ENCRYPTED_CONNECTION , max_pool_size = config . MAX_POOL_SIZE )
self . url = url
self . _pid = os . getpid ( )
self . _active_transaction = None |
def get_data_for_sensors ( macs = [ ] , search_duratio_sec = 5 , bt_device = '' ) :
"""Get lates data for sensors in the MAC ' s list .
Args :
macs ( array ) : MAC addresses
search _ duratio _ sec ( int ) : Search duration in seconds . Default 5
bt _ device ( string ) : Bluetooth device id
Returns :
dict : MAC and state of found sensors""" | log . info ( 'Get latest data for sensors. Stop with Ctrl+C.' )
log . info ( 'Stops automatically in %ss' , search_duratio_sec )
log . info ( 'MACs: %s' , macs )
datas = dict ( )
for new_data in RuuviTagSensor . _get_ruuvitag_datas ( macs , search_duratio_sec , bt_device = bt_device ) :
datas [ new_data [ 0 ] ] = new_data [ 1 ]
return datas |
async def import_wallet ( config : str , credentials : str , import_config_json : str ) -> None :
"""Creates a new secure wallet with the given unique name and then imports its content
according to fields provided in import _ config
This can be seen as an indy _ create _ wallet call with additional content import
: param config : Wallet configuration json .
" id " : string , Identifier of the wallet .
Configured storage uses this identifier to lookup exact wallet data placement .
" storage _ type " : optional < string > , Type of the wallet storage . Defaults to ' default ' .
' Default ' storage type allows to store wallet data in the local file .
Custom storage types can be registered with indy _ register _ wallet _ storage call .
" storage _ config " : optional < object > , Storage configuration json . Storage type defines set of supported keys .
Can be optional if storage supports default configuration .
For ' default ' storage type configuration is :
" path " : optional < string > , Path to the directory with wallet files .
Defaults to $ HOME / . indy _ client / wallet .
Wallet will be stored in the file { path } / { id } / sqlite . db
: param credentials : Wallet credentials json
" key " : string , Key or passphrase used for wallet key derivation .
Look to key _ derivation _ method param for information about supported key derivation methods .
" storage _ credentials " : optional < object > Credentials for wallet storage . Storage type defines set of supported keys .
Can be optional if storage supports default configuration .
For ' default ' storage type should be empty .
" key _ derivation _ method " : optional < string > Algorithm to use for wallet key derivation :
ARGON2I _ MOD - derive secured wallet master key ( used by default )
ARGON2I _ INT - derive secured wallet master key ( less secured but faster )
RAW - raw wallet key master provided ( skip derivation ) .
RAW keys can be generated with generate _ wallet _ key call
: param import _ config _ json : JSON containing settings for input operationЖ {
" path " : path of the file that contains exported wallet content
" key " : key used for export of the wallet
: return : Error code""" | logger = logging . getLogger ( __name__ )
logger . debug ( "import_wallet: >>> config: %r, credentials: %r, import_config_json: %r" , config , credentials , import_config_json )
if not hasattr ( import_wallet , "cb" ) :
logger . debug ( "import_wallet: Creating callback" )
import_wallet . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 ) )
c_config = c_char_p ( config . encode ( 'utf-8' ) )
c_credentials = c_char_p ( credentials . encode ( 'utf-8' ) )
c_import_config_json = c_char_p ( import_config_json . encode ( 'utf-8' ) )
await do_call ( 'indy_import_wallet' , c_config , c_credentials , c_import_config_json , import_wallet . cb )
logger . debug ( "import_wallet: <<<" ) |
def PortPathMatcher ( cls , port_path ) :
"""Returns a device matcher for the given port path .""" | if isinstance ( port_path , str ) : # Convert from sysfs path to port _ path .
port_path = [ int ( part ) for part in SYSFS_PORT_SPLIT_RE . split ( port_path ) ]
return lambda device : device . port_path == port_path |
def apply_calibration ( df , calibration_df , calibration ) :
'''Apply calibration values from ` fit _ fb _ calibration ` result to ` calibration `
object .''' | from dmf_control_board_firmware import FeedbackResults
for i , ( fb_resistor , R_fb , C_fb ) in calibration_df [ [ 'fb_resistor' , 'R_fb' , 'C_fb' ] ] . iterrows ( ) :
calibration . R_fb [ int ( fb_resistor ) ] = R_fb
calibration . C_fb [ int ( fb_resistor ) ] = C_fb
cleaned_df = df . dropna ( )
grouped = cleaned_df . groupby ( [ 'frequency' , 'test_capacitor' , 'repeat_index' ] )
for ( f , channel , repeat_index ) , group in grouped :
r = FeedbackResults ( group . V_actuation . iloc [ 0 ] , f , 5.0 , group . V_hv . values , group . hv_resistor . values , group . V_fb . values , group . fb_resistor . values , calibration )
# Update the measured capacitance values based on the updated
# calibration model .
df . loc [ group . index , 'C' ] = r . capacitance ( ) |
def index ( self , sub , start = None , end = None ) :
"""Like S . find ( ) but raise ValueError when the substring is not found .
: param str sub : Substring to search .
: param int start : Beginning position .
: param int end : Stop comparison at this position .""" | return self . value_no_colors . index ( sub , start , end ) |
def crude_stretch ( self , min_stretch = None , max_stretch = None ) :
"""Perform simple linear stretching .
This is done without any cutoff on the current image and normalizes to
the [ 0,1 ] range .""" | if min_stretch is None :
non_band_dims = tuple ( x for x in self . data . dims if x != 'bands' )
min_stretch = self . data . min ( dim = non_band_dims )
if max_stretch is None :
non_band_dims = tuple ( x for x in self . data . dims if x != 'bands' )
max_stretch = self . data . max ( dim = non_band_dims )
if isinstance ( min_stretch , ( list , tuple ) ) :
min_stretch = self . xrify_tuples ( min_stretch )
if isinstance ( max_stretch , ( list , tuple ) ) :
max_stretch = self . xrify_tuples ( max_stretch )
delta = ( max_stretch - min_stretch )
if isinstance ( delta , xr . DataArray ) : # fillna if delta is NaN
scale_factor = ( 1.0 / delta ) . fillna ( 0 )
else :
scale_factor = 1.0 / delta
attrs = self . data . attrs
self . data -= min_stretch
self . data *= scale_factor
self . data . attrs = attrs |
def GetCoinAssets ( self ) :
"""Get asset ids of all coins present in the wallet .
Returns :
list : of UInt256 asset id ' s .""" | assets = set ( )
for coin in self . GetCoins ( ) :
assets . add ( coin . Output . AssetId )
return list ( assets ) |
def datetime2literal_rnc ( d : datetime . datetime , c : Optional [ Dict ] ) -> str :
"""Format a DateTime object as something MySQL will actually accept .""" | # dt = d . strftime ( " % Y - % m - % d % H : % M : % S " )
# . . . can fail with e . g .
# ValueError : year = 1850 is before 1900 ; the datetime strftime ( ) methods
# require year > = 1900
# http : / / stackoverflow . com / questions / 10263956
dt = d . isoformat ( " " )
# noinspection PyArgumentList
return _mysql . string_literal ( dt , c ) |
def make_auto_deployable ( self , stage , swagger = None ) :
"""Sets up the resource such that it will triggers a re - deployment when Swagger changes
: param swagger : Dictionary containing the Swagger definition of the API""" | if not swagger :
return
# CloudFormation does NOT redeploy the API unless it has a new deployment resource
# that points to latest RestApi resource . Append a hash of Swagger Body location to
# redeploy only when the API data changes . First 10 characters of hash is good enough
# to prevent redeployment when API has not changed
# NOTE : ` str ( swagger ) ` is for backwards compatibility . Changing it to a JSON or something will break compat
generator = logical_id_generator . LogicalIdGenerator ( self . logical_id , str ( swagger ) )
self . logical_id = generator . gen ( )
hash = generator . get_hash ( length = 40 )
# Get the full hash
self . Description = "RestApi deployment id: {}" . format ( hash )
stage . update_deployment_ref ( self . logical_id ) |
def angle ( self , other ) :
"""Return the angle to the vector other""" | return math . acos ( self . dot ( other ) / ( self . magnitude ( ) * other . magnitude ( ) ) ) |
def finalize ( self , album_cache , image_cache , warn_node ) :
"""Update attributes after Sphinx cache is updated .
: param dict album _ cache : Cache of Imgur albums to read . Keys are Imgur IDs , values are Album instances .
: param dict image _ cache : Cache of Imgur images to read . Keys are Imgur IDs , values are Image instances .
: param function warn _ node : sphinx . environment . BuildEnvironment . warn _ node without needing node argument .""" | album = album_cache [ self . imgur_id ] if self . album else None
image = image_cache [ album . cover_id ] if self . album else image_cache [ self . imgur_id ]
options = self . options
# Determine target . Code in directives . py handles defaults and unsets target _ * if : target : is set .
if options [ 'target_gallery' ] and ( album . in_gallery if album else image . in_gallery ) :
options [ 'target' ] = '//imgur.com/gallery/{}' . format ( album . imgur_id if album else image . imgur_id )
elif options [ 'target_page' ] :
options [ 'target' ] = '//imgur.com/{}' . format ( album . imgur_id if album else image . imgur_id )
elif options [ 'target_largest' ] and not album :
options [ 'target' ] = '//i.imgur.com/' + image . filename ( full_size = True )
elif not options [ 'target' ] and ( options [ 'width' ] or options [ 'height' ] or options [ 'scale' ] ) :
options [ 'target' ] = '//i.imgur.com/' + image . filename ( full_size = True )
# Handle scale with no API data .
if options [ 'scale' ] :
if not image . width and not options [ 'width' ] and not image . height and not options [ 'height' ] :
options [ 'scale' ] = ''
warn_node ( 'Could not obtain image size. :scale: option is ignored.' )
elif not image . width and not options [ 'width' ] :
warn_node ( 'Could not obtain image width. :scale: option is partially ignored.' )
elif not image . width or not image . height :
warn_node ( 'Could not obtain image height. :scale: option is partially ignored.' )
# Handle scale , width , and height .
if options [ 'scale' ] and ( options [ 'width' ] or image . width ) :
match = RE_WIDTH_HEIGHT . match ( options [ 'width' ] or '%dpx' % image . width )
options [ 'width' ] = '{}{}' . format ( int ( float ( match . group ( 1 ) ) * ( options [ 'scale' ] / 100.0 ) ) , match . group ( 2 ) )
if options [ 'scale' ] and ( options [ 'height' ] or image . height ) :
match = RE_WIDTH_HEIGHT . match ( options [ 'height' ] or '%dpx' % image . height )
options [ 'height' ] = '{}{}' . format ( int ( float ( match . group ( 1 ) ) * ( options [ 'scale' ] / 100.0 ) ) , match . group ( 2 ) )
# Set src and style .
self . src = '//i.imgur.com/' + image . filename ( options [ 'width' ] , options [ 'height' ] )
style = [ p for p in ( ( k , options [ k ] ) for k in ( 'width' , 'height' ) ) if p [ 1 ] ]
if style :
self . style = '; ' . join ( '{}: {}' . format ( k , v ) for k , v in style )
# Determine alt text .
if not options [ 'alt' ] :
options [ 'alt' ] = image . title or self . src [ 2 : ] |
def import_generated_autoboto_module ( self , name ) :
"""Imports a module from the generated autoboto package in the build directory ( not target _ dir ) .
For example , to import autoboto . services . s3 . shapes , call :
botogen . import _ generated _ autoboto _ module ( " services . s3 . shapes " )""" | if str ( self . config . build_dir ) not in sys . path :
sys . path . append ( str ( self . config . build_dir ) )
return importlib . import_module ( f"{self.config.target_package}.{name}" ) |
def _GetStat ( self ) :
"""Retrieves information about the file entry .
Returns :
VFSStat : a stat object .""" | stat_object = super ( APFSFileEntry , self ) . _GetStat ( )
# File data stat information .
stat_object . size = self . _fsapfs_file_entry . size
# Ownership and permissions stat information .
stat_object . mode = self . _fsapfs_file_entry . file_mode & 0x0fff
stat_object . uid = self . _fsapfs_file_entry . owner_identifier
stat_object . gid = self . _fsapfs_file_entry . group_identifier
# File entry type stat information .
stat_object . type = self . entry_type
# Other stat information .
stat_object . ino = self . _fsapfs_file_entry . identifier
stat_object . fs_type = 'APFS'
stat_object . is_allocated = True
return stat_object |
def setKUS ( self , K , U , S ) :
"""setKUS ( CLMM self , MatrixXd const & K , MatrixXd const & U , VectorXd const & S )
Parameters
K : MatrixXd const &
U : MatrixXd const &
S : VectorXd const &""" | return _core . CLMM_setKUS ( self , K , U , S ) |
def get_multiplicon_segments ( self , value ) :
"""Return a dictionary describing the genome segments that
contribute to the named multiplicon , keyed by genome , with
( start feature , end feature ) tuples .""" | sql = '''SELECT genome, first, last FROM segments
WHERE multiplicon=:mp'''
cur = self . _dbconn . cursor ( )
cur . execute ( sql , { 'mp' : str ( value ) } )
result = cur . fetchall ( )
cur . close ( )
segdict = collections . defaultdict ( tuple )
for genome , start , end in result :
segdict [ genome ] = ( start , end )
return segdict |
def write_recording ( recording , save_path ) :
'''Save recording extractor to MEArec format .
Parameters
recording : RecordingExtractor
Recording extractor object to be saved
save _ path : str
. h5 or . hdf5 path''' | assert HAVE_MREX , "To use the MEArec extractors, install MEArec: \n\n pip install MEArec\n\n"
save_path = Path ( save_path )
if save_path . is_dir ( ) :
print ( "The file will be saved as recording.h5 in the provided folder" )
save_path = save_path / 'recording.h5'
if save_path . suffix == '.h5' or save_path . suffix == '.hdf5' :
info = { 'recordings' : { 'fs' : recording . get_sampling_frequency ( ) } }
rec_dict = { 'recordings' : recording . get_traces ( ) }
if 'location' in recording . get_channel_property_names ( ) :
positions = np . array ( [ recording . get_channel_property ( chan , 'location' ) for chan in recording . get_channel_ids ( ) ] )
rec_dict [ 'channel_positions' ] = positions
recgen = mr . RecordingGenerator ( rec_dict = rec_dict , info = info )
mr . save_recording_generator ( recgen , str ( save_path ) , verbose = False )
else :
raise Exception ( "Provide a folder or an .h5/.hdf5 as 'save_path'" ) |
def _safe_issue_checkout ( repo , issue = None ) :
"""Safely checkout branch for the issue .""" | branch_name = str ( issue ) if issue else 'master'
if branch_name not in repo . heads :
branch = repo . create_head ( branch_name )
else :
branch = repo . heads [ branch_name ]
branch . checkout ( ) |
def _write_conf_file ( ) :
"""Write configuration file as it is defined in settings .""" | with open ( CONF_FILE , "w" ) as f :
f . write ( DEFAULT_PROFTPD_CONF )
logger . debug ( "'%s' created." , CONF_FILE ) |
def _get_obs_array ( self , k , use_raw = False , layer = 'X' ) :
"""Get an array from the layer ( default layer = ' X ' ) along the observation dimension by first looking up
obs . keys and then var . index .""" | in_raw_var_names = k in self . raw . var_names if self . raw is not None else False
if use_raw and self . raw is None :
raise ValueError ( '.raw doesn\'t exist' )
if k in self . obs . keys ( ) :
x = self . _obs [ k ]
elif in_raw_var_names and use_raw and layer == 'X' :
x = self . raw [ : , k ] . X
elif k in self . var_names and not use_raw and ( layer == 'X' or layer in self . layers . keys ( ) ) :
x = self [ : , k ] . X if layer == 'X' else self [ : , k ] . layers [ layer ]
elif use_raw and layer != 'X' :
raise ValueError ( 'No layers in .raw' )
elif layer != 'X' and layer not in self . layers . keys ( ) :
raise ValueError ( 'Did not find {} in layers.keys.' . format ( layer ) )
else :
raise ValueError ( 'Did not find {} in obs.keys or var_names.' . format ( k ) )
return x |
def _repr_pretty_ ( self , p , cycle ) :
"""Derived from IPython ' s dict and sequence pretty printer functions ,
https : / / github . com / ipython / ipython / blob / master / IPython / lib / pretty . py""" | if cycle :
p . text ( '{...}' )
else :
keys = self . keys ( )
if keys :
delim_start = '[{'
delim_end = '}]'
wid_max_max = self . _repr_max_width
wid_max = max ( [ len ( k ) for k in keys ] )
wid_max = min ( [ wid_max , wid_max_max ] )
key_template = '{{:{:d}s}}: ' . format ( wid_max )
with p . group ( len ( delim_start ) , delim_start , delim_end ) : # Loop over item keys
for idx , key in p . _enumerate ( keys ) :
if idx :
p . text ( ',' )
p . breakable ( )
p . text ( key_template . format ( key ) )
p . pretty ( self [ key ] )
else :
p . text ( '{}' ) |
def make ( parser ) :
"""Prepare a data disk on remote host .""" | sub_command_help = dedent ( """
Create OSDs from a data disk on a remote host:
ceph-deploy osd create {node} --data /path/to/device
For bluestore, optional devices can be used::
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device
ceph-deploy osd create {node} --data /path/to/data --block-wal /path/to/wal-device
ceph-deploy osd create {node} --data /path/to/data --block-db /path/to/db-device --block-wal /path/to/wal-device
For filestore, the journal must be specified, as well as the objectstore::
ceph-deploy osd create {node} --filestore --data /path/to/data --journal /path/to/journal
For data devices, it can be an existing logical volume in the format of:
vg/lv, or a device. For other OSD components like wal, db, and journal, it
can be logical volume (in vg/lv format) or it must be a GPT partition.
""" )
parser . formatter_class = argparse . RawDescriptionHelpFormatter
parser . description = sub_command_help
osd_parser = parser . add_subparsers ( dest = 'subcommand' )
osd_parser . required = True
osd_list = osd_parser . add_parser ( 'list' , help = 'List OSD info from remote host(s)' )
osd_list . add_argument ( 'host' , nargs = '+' , metavar = 'HOST' , help = 'remote host(s) to list OSDs from' )
osd_list . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , )
osd_create = osd_parser . add_parser ( 'create' , help = 'Create new Ceph OSD daemon by preparing and activating a device' )
osd_create . add_argument ( '--data' , metavar = 'DATA' , help = 'The OSD data logical volume (vg/lv) or absolute path to device' )
osd_create . add_argument ( '--journal' , help = 'Logical Volume (vg/lv) or path to GPT partition' , )
osd_create . add_argument ( '--zap-disk' , action = 'store_true' , help = 'DEPRECATED - cannot zap when creating an OSD' )
osd_create . add_argument ( '--fs-type' , metavar = 'FS_TYPE' , choices = [ 'xfs' , 'btrfs' ] , default = 'xfs' , help = 'filesystem to use to format DEVICE (xfs, btrfs)' , )
osd_create . add_argument ( '--dmcrypt' , action = 'store_true' , help = 'use dm-crypt on DEVICE' , )
osd_create . add_argument ( '--dmcrypt-key-dir' , metavar = 'KEYDIR' , default = '/etc/ceph/dmcrypt-keys' , help = 'directory where dm-crypt keys are stored' , )
osd_create . add_argument ( '--filestore' , action = 'store_true' , default = None , help = 'filestore objectstore' , )
osd_create . add_argument ( '--bluestore' , action = 'store_true' , default = None , help = 'bluestore objectstore' , )
osd_create . add_argument ( '--block-db' , default = None , help = 'bluestore block.db path' )
osd_create . add_argument ( '--block-wal' , default = None , help = 'bluestore block.wal path' )
osd_create . add_argument ( 'host' , nargs = '?' , metavar = 'HOST' , help = 'Remote host to connect' )
osd_create . add_argument ( '--debug' , action = 'store_true' , help = 'Enable debug mode on remote ceph-volume calls' , )
parser . set_defaults ( func = osd , ) |
def getextensibleindex ( bunchdt , data , commdct , key , objname ) :
"""get the index of the first extensible item""" | theobject = getobject ( bunchdt , key , objname )
if theobject == None :
return None
theidd = iddofobject ( data , commdct , key )
extensible_i = [ i for i in range ( len ( theidd ) ) if 'begin-extensible' in theidd [ i ] ]
try :
extensible_i = extensible_i [ 0 ]
except IndexError :
return theobject |
def _check_disabled ( self ) :
"""Check if health check is disabled .
It logs a message if health check is disabled and it also adds an item
to the action queue based on ' on _ disabled ' setting .
Returns :
True if check is disabled otherwise False .""" | if self . config [ 'check_disabled' ] :
if self . config [ 'on_disabled' ] == 'withdraw' :
self . log . info ( "Check is disabled and ip_prefix will be " "withdrawn" )
self . log . info ( "adding %s in the queue" , self . ip_with_prefixlen )
self . action . put ( self . del_operation )
self . log . info ( "Check is now permanently disabled" )
elif self . config [ 'on_disabled' ] == 'advertise' :
self . log . info ( "check is disabled, ip_prefix wont be withdrawn" )
self . log . info ( "adding %s in the queue" , self . ip_with_prefixlen )
self . action . put ( self . add_operation )
self . log . info ( 'check is now permanently disabled' )
return True
return False |
def protected_operation ( fn ) :
"""Use this decorator to prevent an operation from being executed
when the related uri resource is still in use .
The parent _ object must contain :
* a request
* with a registry . queryUtility ( IReferencer )
: raises pyramid . httpexceptions . HTTPConflict : Signals that we don ' t want to
delete a certain URI because it ' s still in use somewhere else .
: raises pyramid . httpexceptions . HTTPInternalServerError : Raised when we were
unable to check that the URI is no longer being used .""" | @ functools . wraps ( fn )
def advice ( parent_object , * args , ** kw ) :
response = _advice ( parent_object . request )
if response is not None :
return response
else :
return fn ( parent_object , * args , ** kw )
return advice |
def phoncontent ( self , cls = 'current' , correctionhandling = CorrectionHandling . CURRENT ) :
"""Get the phonetic content explicitly associated with this element ( of the specified class ) .
Unlike : meth : ` phon ` , this method does not recurse into child elements ( with the sole exception of the Correction / New element ) , and it returns the PhonContent instance rather than the actual text !
Parameters :
cls ( str ) : The class of the phonetic content to obtain , defaults to ` ` current ` ` .
correctionhandling : Specifies what content to retrieve when corrections are encountered . The default is ` ` CorrectionHandling . CURRENT ` ` , which will retrieve the corrected / current content . You can set this to ` ` CorrectionHandling . ORIGINAL ` ` if you want the content prior to correction , and ` ` CorrectionHandling . EITHER ` ` if you don ' t care .
Returns :
The phonetic content ( : class : ` PhonContent ` )
Raises :
: class : ` NoSuchPhon ` if there is no phonetic content for the element
See also :
: meth : ` phon `
: meth : ` textcontent `
: meth : ` text `""" | if not self . SPEAKABLE : # only printable elements can hold text
raise NoSuchPhon
# Find explicit text content ( same class )
for e in self :
if isinstance ( e , PhonContent ) :
if cls is None or e . cls == cls :
return e
elif isinstance ( e , Correction ) :
try :
return e . phoncontent ( cls , correctionhandling )
except NoSuchPhon :
pass
raise NoSuchPhon |
def verify_create_instance ( self , ** kwargs ) :
"""Verifies an instance creation command .
Without actually placing an order .
See : func : ` create _ instance ` for a list of available options .
Example : :
new _ vsi = {
' domain ' : u ' test01 . labs . sftlyr . ws ' ,
' hostname ' : u ' minion05 ' ,
' datacenter ' : u ' hkg02 ' ,
' flavor ' : ' BL1_1X2X100'
' dedicated ' : False ,
' private ' : False ,
' os _ code ' : u ' UBUNTU _ LATEST ' ,
' hourly ' : True ,
' ssh _ keys ' : [ 1234 ] ,
' disks ' : ( ' 100 ' , ' 25 ' ) ,
' local _ disk ' : True ,
' tags ' : ' test , pleaseCancel ' ,
' public _ security _ groups ' : [ 12 , 15]
vsi = mgr . verify _ create _ instance ( * * new _ vsi )
# vsi will be a SoftLayer _ Container _ Product _ Order _ Virtual _ Guest
# if your order is correct . Otherwise you will get an exception
print vsi""" | kwargs . pop ( 'tags' , None )
create_options = self . _generate_create_dict ( ** kwargs )
return self . guest . generateOrderTemplate ( create_options ) |
def _set_limit ( self , v , load = False ) :
"""Setter method for limit , mapped from YANG variable / hardware / profile / tcam / limit ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ limit is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ limit ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = limit . limit , is_container = 'container' , presence = False , yang_name = "limit" , rest_name = "limit" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set upper limit to applications on TCAM entry usage' , u'display-when' : u"../predefined/tcam_profiletype = 'npb-optimised-1'" , u'cli-suppress-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-hardware' , defining_module = 'brocade-hardware' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """limit must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=limit.limit, is_container='container', presence=False, yang_name="limit", rest_name="limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set upper limit to applications on TCAM entry usage', u'display-when': u"../predefined/tcam_profiletype = 'npb-optimised-1'", u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""" , } )
self . __limit = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def distance_to_contact ( D , alpha = 1 ) :
"""Compute contact matrix from input distance matrix . Distance values of
zeroes are given the largest contact count otherwise inferred non - zero
distance values .""" | if callable ( alpha ) :
distance_function = alpha
else :
try :
a = np . float64 ( alpha )
def distance_function ( x ) :
return 1 / ( x ** ( 1 / a ) )
except TypeError :
print ( "Alpha parameter must be callable or an array-like" )
raise
except ZeroDivisionError :
raise ValueError ( "Alpha parameter must be non-zero" )
m = np . max ( distance_function ( D [ D != 0 ] ) )
M = np . zeros ( D . shape )
M [ D != 0 ] = distance_function ( D [ D != 0 ] )
M [ D == 0 ] = m
return M |
def load ( cls , path ) :
"""Create a new MLPipeline from a JSON specification .
The JSON file format is the same as the one created by the ` to _ dict ` method .
Args :
path ( str ) : Path of the JSON file to load .
Returns :
MLPipeline :
A new MLPipeline instance with the specification found
in the JSON file .""" | with open ( path , 'r' ) as in_file :
metadata = json . load ( in_file )
return cls . from_dict ( metadata ) |
def get_results ( self , stream , time_interval ) :
"""Get the results for a given stream
: param time _ interval : The time interval
: param stream : The stream object
: return : A generator over stream instances""" | query = stream . stream_id . as_raw ( )
query [ 'datetime' ] = { '$gt' : time_interval . start , '$lte' : time_interval . end }
with switch_db ( StreamInstanceModel , 'hyperstream' ) :
for instance in StreamInstanceModel . objects ( __raw__ = query ) :
yield StreamInstance ( timestamp = instance . datetime , value = instance . value ) |
def plot_fit ( self , ** kwargs ) :
"""Plots the fit of the model against the data""" | import matplotlib . pyplot as plt
import seaborn as sns
figsize = kwargs . get ( 'figsize' , ( 10 , 7 ) )
plt . figure ( figsize = figsize )
date_index = self . index [ self . ar : self . data . shape [ 0 ] ]
mu , Y = self . _model ( self . latent_variables . get_z_values ( ) )
plt . plot ( date_index , Y , label = 'Data' )
plt . plot ( date_index , mu , label = 'Filter' , c = 'black' )
plt . title ( self . data_name )
plt . legend ( loc = 2 )
plt . show ( ) |
def plotAccuracy ( suite , name ) :
"""Plots classification accuracy""" | path = suite . cfgparser . get ( name , "path" )
path = os . path . join ( path , name )
accuracy = defaultdict ( list )
sensations = defaultdict ( list )
for exp in suite . get_exps ( path = path ) :
params = suite . get_params ( exp )
maxTouches = params [ "num_sensations" ]
cells = params [ "cells_per_axis" ]
res = suite . get_history ( exp , 0 , "Correct classification" )
classified = [ any ( x ) for x in res ]
accuracy [ cells ] = float ( sum ( classified ) ) / float ( len ( classified ) )
touches = [ np . argmax ( x ) or maxTouches for x in res ]
sensations [ cells ] = [ np . mean ( touches ) , np . max ( touches ) ]
plt . title ( "Classification Accuracy" )
accuracy = OrderedDict ( sorted ( accuracy . items ( ) , key = lambda t : t [ 0 ] ) )
fig , ax1 = plt . subplots ( )
ax1 . plot ( accuracy . keys ( ) , accuracy . values ( ) , "b" )
ax1 . set_xlabel ( "Cells per axis" )
ax1 . set_ylabel ( "Accuracy" , color = "b" )
ax1 . tick_params ( "y" , colors = "b" )
sensations = OrderedDict ( sorted ( sensations . items ( ) , key = lambda t : t [ 0 ] ) )
ax2 = ax1 . twinx ( )
ax2 . set_prop_cycle ( linestyle = [ "-" , "--" ] )
ax2 . plot ( sensations . keys ( ) , sensations . values ( ) , "r" )
ax2 . set_ylabel ( "Sensations" , color = "r" )
ax2 . tick_params ( "y" , colors = "r" )
ax2 . legend ( ( "Mean" , "Max" ) )
# save
path = suite . cfgparser . get ( name , "path" )
plotPath = os . path . join ( path , "{}.pdf" . format ( name ) )
plt . savefig ( plotPath )
plt . close ( ) |
def exclusive ( via = threading . Lock ) :
"""Mark a callable as exclusive
: param via : factory for a Lock to guard the callable
Guards the callable against being entered again before completion .
Explicitly raises a : py : exc : ` RuntimeError ` on violation .
: note : If applied to a method , it is exclusive across all instances .""" | def make_exclusive ( fnc ) :
fnc_guard = via ( )
@ functools . wraps ( fnc )
def exclusive_call ( * args , ** kwargs ) :
if fnc_guard . acquire ( blocking = False ) :
try :
return fnc ( * args , ** kwargs )
finally :
fnc_guard . release ( )
else :
raise RuntimeError ( 'exclusive call to %s violated' )
return exclusive_call
return make_exclusive |
def _SetSshHostKeys ( self , host_key_types = None ) :
"""Regenerates SSH host keys when the VM is restarted with a new IP address .
Booting a VM from an image with a known SSH key allows a number of attacks .
This function will regenerating the host key whenever the IP address
changes . This applies the first time the instance is booted , and each time
the disk is used to boot a new instance .
Args :
host _ key _ types : string , a comma separated list of host key types .""" | section = 'Instance'
instance_id = self . _GetInstanceId ( )
if instance_id != self . instance_config . GetOptionString ( section , 'instance_id' ) :
self . logger . info ( 'Generating SSH host keys for instance %s.' , instance_id )
file_regex = re . compile ( r'ssh_host_(?P<type>[a-z0-9]*)_key\Z' )
key_dir = '/etc/ssh'
key_files = [ f for f in os . listdir ( key_dir ) if file_regex . match ( f ) ]
key_types = host_key_types . split ( ',' ) if host_key_types else [ ]
key_types_files = [ 'ssh_host_%s_key' % key_type for key_type in key_types ]
for key_file in set ( key_files ) | set ( key_types_files ) :
key_type = file_regex . match ( key_file ) . group ( 'type' )
key_dest = os . path . join ( key_dir , key_file )
self . _GenerateSshKey ( key_type , key_dest )
self . _StartSshd ( )
self . instance_config . SetOption ( section , 'instance_id' , str ( instance_id ) ) |
def ugettext ( self , text ) :
"""Translates message / text and returns it in a unicode string .
Using runtime to get i18n service .""" | runtime_service = self . runtime . service ( self , "i18n" )
runtime_ugettext = runtime_service . ugettext
return runtime_ugettext ( text ) |
def _extract_jar ( self , coordinate , jar_path ) :
"""Extracts the jar to a subfolder of workdir / extracted and returns the path to it .""" | with open ( jar_path , 'rb' ) as f :
sha = sha1 ( f . read ( ) ) . hexdigest ( )
outdir = os . path . join ( self . workdir , 'extracted' , sha )
if not os . path . exists ( outdir ) :
ZIP . extract ( jar_path , outdir )
self . context . log . debug ( 'Extracting jar {jar} at {jar_path}.' . format ( jar = coordinate , jar_path = jar_path ) )
else :
self . context . log . debug ( 'Jar {jar} already extracted at {jar_path}.' . format ( jar = coordinate , jar_path = jar_path ) )
return outdir |
def requires_product_environment ( func , * args , ** kws ) :
"""task decorator that makes sure that the product environment
of django _ productline is activated :
- context is bound
- features have been composed""" | from django_productline import startup
startup . select_product ( )
return func ( * args , ** kws ) |
def poll_for_response ( self ) :
"""Polls the device for user input
If there is a keymapping for the device , the key map is applied
to the key reported from the device .
If a response is waiting to be processed , the response is appended
to the internal response _ queue""" | key_state = self . con . check_for_keypress ( )
if key_state != NO_KEY_DETECTED :
response = self . con . get_current_response ( )
if self . keymap is not None :
response [ 'key' ] = self . keymap [ response [ 'key' ] ]
else :
response [ 'key' ] -= 1
self . response_queue . append ( response ) |
def _apply ( self , plan ) :
'''Required function of manager . py to actually apply a record change .
: param plan : Contains the zones and changes to be made
: type plan : octodns . provider . base . Plan
: type return : void''' | desired = plan . desired
changes = plan . changes
self . log . debug ( '_apply: zone=%s, len(changes)=%d' , desired . name , len ( changes ) )
azure_zone_name = desired . name [ : len ( desired . name ) - 1 ]
self . _check_zone ( azure_zone_name , create = True )
for change in changes :
class_name = change . __class__ . __name__
getattr ( self , '_apply_{}' . format ( class_name ) ) ( change ) |
def parent_folder ( path , base = None ) :
""": param str | None path : Path to file or folder
: param str | None base : Base folder to use for relative paths ( default : current working dir )
: return str : Absolute path of parent folder of ' path '""" | return path and os . path . dirname ( resolved_path ( path , base = base ) ) |
def _handle_func_def ( self , node , scope , ctxt , stream ) :
"""Handle FuncDef nodes
: node : TODO
: scope : TODO
: ctxt : TODO
: stream : TODO
: returns : TODO""" | self . _dlog ( "handling function definition" )
func = self . _handle_node ( node . decl , scope , ctxt , stream )
func . body = node . body |
def remove_profile ( self , profile ) :
"""Remove a profile .
: param profile : The profile to be removed .
: type profile : basestring , str""" | self . remove_file ( os . path . join ( self . root_directory , 'minimum_needs' , profile + '.json' ) ) |
def _check_max_running ( self , func , data , opts , now ) :
'''Return the schedule data structure''' | # Check to see if there are other jobs with this
# signature running . If there are more than maxrunning
# jobs present then don ' t start another .
# If jid _ include is False for this job we can ignore all this
# NOTE - - jid _ include defaults to True , thus if it is missing from the data
# dict we treat it like it was there and is True
# Check if we ' re able to run
if not data [ 'run' ] :
return data
if 'jid_include' not in data or data [ 'jid_include' ] :
jobcount = 0
if self . opts [ '__role' ] == 'master' :
current_jobs = salt . utils . master . get_running_jobs ( self . opts )
else :
current_jobs = salt . utils . minion . running ( self . opts )
for job in current_jobs :
if 'schedule' in job :
log . debug ( 'schedule.handle_func: Checking job against fun ' '%s: %s' , func , job )
if data [ 'name' ] == job [ 'schedule' ] and salt . utils . process . os_is_running ( job [ 'pid' ] ) :
jobcount += 1
log . debug ( 'schedule.handle_func: Incrementing jobcount, ' 'now %s, maxrunning is %s' , jobcount , data [ 'maxrunning' ] )
if jobcount >= data [ 'maxrunning' ] :
log . debug ( 'schedule.handle_func: The scheduled job ' '%s was not started, %s already running' , data [ 'name' ] , data [ 'maxrunning' ] )
data [ '_skip_reason' ] = 'maxrunning'
data [ '_skipped' ] = True
data [ '_skipped_time' ] = now
data [ 'run' ] = False
return data
return data |
def setMaxSpeed ( self , laneID , speed ) :
"""setMaxSpeed ( string , double ) - > None
Sets a new maximum allowed speed on the lane in m / s .""" | self . _connection . _sendDoubleCmd ( tc . CMD_SET_LANE_VARIABLE , tc . VAR_MAXSPEED , laneID , speed ) |
def sign ( self , consumer_secret , access_token_secret , method , url , oauth_params , req_kwargs ) :
'''Sign request using PLAINTEXT method .
: param consumer _ secret : Consumer secret .
: type consumer _ secret : str
: param access _ token _ secret : Access token secret ( optional ) .
: type access _ token _ secret : str
: param method : Unused
: type method : str
: param url : Unused
: type url : str
: param oauth _ params : Unused
: type oauth _ params : dict
: param req _ kwargs : Unused
: type req _ kwargs : dict''' | key = self . _escape ( consumer_secret ) + b'&'
if access_token_secret :
key += self . _escape ( access_token_secret )
return key . decode ( ) |
def _instantiate ( cls , params ) :
"""Helper to instantiate Attention classes from parameters . Warns in log if parameter is not supported
by class constructor .
: param cls : Attention class .
: param params : configuration parameters .
: return : instance of ` cls ` type .""" | sig_params = inspect . signature ( cls . __init__ ) . parameters
valid_params = dict ( )
for key , value in params . items ( ) :
if key in sig_params :
valid_params [ key ] = value
else :
logger . debug ( 'Type %s does not support parameter \'%s\'' % ( cls . __name__ , key ) )
return cls ( ** valid_params ) |
def get_embedded_items ( result_collection ) :
"""Given a result _ collection ( returned by a previous API call that
returns a collection , like get _ bundle _ list ( ) or search ( ) ) , return a
list of embedded items with each item in the returned list
considered a result object .
' result _ collection ' a JSON object returned by a previous API
call . The parameter ' embed _ items ' must have been True when the
result _ collection was originally requested . May not be None .
Returns a list , which may be empty if no embedded items were found .""" | # Argument error checking .
assert result_collection is not None
result = [ ]
embedded_objects = result_collection . get ( '_embedded' )
if embedded_objects is not None : # Handle being passed a non - collection gracefully .
result = embedded_objects . get ( 'items' , result )
return result |
def formatmonthname ( self , theyear , themonth , withyear = True ) :
"""Return a month name translated as a table row .""" | monthname = '%s %s' % ( MONTHS [ themonth ] . title ( ) , theyear )
return '<caption>%s</caption>' % monthname |
def _get_all_eip_addresses ( addresses = None , allocation_ids = None , region = None , key = None , keyid = None , profile = None ) :
'''Get all EIP ' s associated with the current credentials .
addresses
( list ) - Optional list of addresses . If provided , only those those in the
list will be returned .
allocation _ ids
( list ) - Optional list of allocation IDs . If provided , only the
addresses associated with the given allocation IDs will be returned .
returns
( list ) - The requested Addresses as a list of : class : ` boto . ec2 . address . Address `''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
try :
return conn . get_all_addresses ( addresses = addresses , allocation_ids = allocation_ids )
except boto . exception . BotoServerError as e :
log . error ( e )
return [ ] |
def to_list ( self , n = None ) :
"""Converts sequence to list of elements .
> > > type ( seq ( [ ] ) . to _ list ( ) )
list
> > > type ( seq ( [ ] ) )
functional . pipeline . Sequence
> > > seq ( [ 1 , 2 , 3 ] ) . to _ list ( )
[1 , 2 , 3]
: param n : Take n elements of sequence if not None
: return : list of elements in sequence""" | if n is None :
self . cache ( )
return self . _base_sequence
else :
return self . cache ( ) . take ( n ) . list ( ) |
def begin ( self ) :
"""Called once before using the session to check global step .""" | self . _global_step_tensor = tf . train . get_global_step ( )
if self . _global_step_tensor is None :
raise RuntimeError ( 'Global step should be created to use StepCounterHook.' ) |
def _patch_for_tf1_12 ( tf ) :
"""Monkey patch tf 1.12 so tfds can use it .""" | tf . io . gfile = tf . gfile
tf . io . gfile . copy = tf . gfile . Copy
tf . io . gfile . exists = tf . gfile . Exists
tf . io . gfile . glob = tf . gfile . Glob
tf . io . gfile . isdir = tf . gfile . IsDirectory
tf . io . gfile . listdir = tf . gfile . ListDirectory
tf . io . gfile . makedirs = tf . gfile . MakeDirs
tf . io . gfile . mkdir = tf . gfile . MkDir
tf . io . gfile . remove = tf . gfile . Remove
tf . io . gfile . rename = tf . gfile . Rename
tf . io . gfile . rmtree = tf . gfile . DeleteRecursively
tf . io . gfile . stat = tf . gfile . Stat
tf . io . gfile . walk = tf . gfile . Walk
tf . io . gfile . GFile = tf . gfile . GFile
tf . data . experimental = tf . contrib . data
tf . compat . v1 = types . ModuleType ( "tf.compat.v1" )
tf . compat . v1 . assert_greater = tf . assert_greater
tf . compat . v1 . placeholder = tf . placeholder
tf . compat . v1 . ConfigProto = tf . ConfigProto
tf . compat . v1 . Session = tf . Session
tf . compat . v1 . enable_eager_execution = tf . enable_eager_execution
tf . compat . v1 . io = tf . io
tf . compat . v1 . data = tf . data
tf . compat . v1 . data . Dataset = tf . data . Dataset
tf . compat . v1 . data . make_one_shot_iterator = ( lambda ds : ds . make_one_shot_iterator ( ) )
tf . compat . v1 . train = tf . train
tf . compat . v1 . global_variables_initializer = tf . global_variables_initializer
tf . compat . v1 . test = tf . test
tf . compat . v1 . test . get_temp_dir = tf . test . get_temp_dir
tf . nest = tf . contrib . framework . nest |
def simxGetCollectionHandle ( clientID , collectionName , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual''' | handle = ct . c_int ( )
if ( sys . version_info [ 0 ] == 3 ) and ( type ( collectionName ) is str ) :
collectionName = collectionName . encode ( 'utf-8' )
return c_GetCollectionHandle ( clientID , collectionName , ct . byref ( handle ) , operationMode ) , handle . value |
def OpenDialog ( self , Name , * Params ) :
"""Open dialog . Use this method to open dialogs added in newer Skype versions if there is no
dedicated method in Skype4Py .
: Parameters :
Name : str
Dialog name .
Params : unicode
One or more optional parameters .""" | self . _Skype . _Api . allow_focus ( self . _Skype . Timeout )
params = filter ( None , ( str ( Name ) , ) + Params )
self . _Skype . _DoCommand ( 'OPEN %s' % tounicode ( ' ' . join ( params ) ) ) |
def find_matching_middleware ( self , method , path ) :
"""Iterator handling the matching of middleware against a method + path
pair . Yields the middleware , and the""" | for mw in self . mw_list :
if not mw . matches_method ( method ) :
continue
# get the path matching this middleware and the ' rest ' of the url
# ( i . e . the part that comes AFTER the match ) to be potentially
# matched later by a subchain
path_match , rest_url = mw . path_split ( path )
if self . should_skip_middleware ( mw , path_match , rest_url ) :
continue
yield mw , path_match , rest_url |
def _adaptSynapses ( self , inputVector , activeColumns , predictedActiveCells ) :
"""This is the primary learning method . It updates synapses ' permanence based
on the bottom - up input to the TP and the TP ' s active cells .
For each active cell , its synapses ' permanences are updated as follows :
1 . if pre - synaptic input is ON due to a correctly predicted cell ,
increase permanence by _ synPredictedInc
2 . else if input is ON due to an active cell , increase permanence by
_ synPermActiveInc
3 . else input is OFF , decrease permanence by _ synPermInactiveDec
Parameters :
inputVector : a numpy array whose ON bits represent the active cells from
temporal memory
activeColumns : an array containing the indices of the columns that
survived the inhibition step
predictedActiveCells : a numpy array with numInputs elements . A 1 indicates
that this cell switched from predicted state in
the previous time step to active state in the current
timestep""" | inputIndices = numpy . where ( inputVector > 0 ) [ 0 ]
predictedIndices = numpy . where ( predictedActiveCells > 0 ) [ 0 ]
permChanges = numpy . zeros ( self . _numInputs )
# Decrement inactive TM cell - > active TP cell connections
permChanges . fill ( - 1 * self . _synPermInactiveDec )
# Increment active TM cell - > active TP cell connections
permChanges [ inputIndices ] = self . _synPermActiveInc
# Increment correctly predicted TM cell - > active TP cell connections
permChanges [ predictedIndices ] = self . _synPredictedInc
if self . _spVerbosity > 4 :
print "\n============== _adaptSynapses ======"
print "Active input indices:" , inputIndices
print "predicted input indices:" , predictedIndices
print "\n============== _adaptSynapses ======\n"
for i in activeColumns : # Get the permanences of the synapses of TP cell i
perm = self . _permanences . getRow ( i )
# Only consider connections in column ' s potential pool ( receptive field )
maskPotential = numpy . where ( self . _potentialPools . getRow ( i ) > 0 ) [ 0 ]
perm [ maskPotential ] += permChanges [ maskPotential ]
self . _updatePermanencesForColumn ( perm , i , raisePerm = False ) |
def create_adv_by_name ( model , x , attack_type , sess , dataset , y = None , ** kwargs ) :
"""Creates the symbolic graph of an adversarial example given the name of
an attack . Simplifies creating the symbolic graph of an attack by defining
dataset - specific parameters .
Dataset - specific default parameters are used unless a different value is
given in kwargs .
: param model : an object of Model class
: param x : Symbolic input to the attack .
: param attack _ type : A string that is the name of an attack .
: param sess : Tensorflow session .
: param dataset : The name of the dataset as a string to use for default
params .
: param y : ( optional ) a symbolic variable for the labels .
: param kwargs : ( optional ) additional parameters to be passed to the attack .""" | # TODO : black box attacks
attack_names = { 'FGSM' : FastGradientMethod , 'MadryEtAl' : MadryEtAl , 'MadryEtAl_y' : MadryEtAl , 'MadryEtAl_multigpu' : MadryEtAlMultiGPU , 'MadryEtAl_y_multigpu' : MadryEtAlMultiGPU }
if attack_type not in attack_names :
raise Exception ( 'Attack %s not defined.' % attack_type )
attack_params_shared = { 'mnist' : { 'eps' : .3 , 'eps_iter' : 0.01 , 'clip_min' : 0. , 'clip_max' : 1. , 'nb_iter' : 40 } , 'cifar10' : { 'eps' : 8. / 255 , 'eps_iter' : 0.01 , 'clip_min' : 0. , 'clip_max' : 1. , 'nb_iter' : 20 } }
with tf . variable_scope ( attack_type ) :
attack_class = attack_names [ attack_type ]
attack = attack_class ( model , sess = sess )
# Extract feedable and structural keyword arguments from kwargs
fd_kwargs = attack . feedable_kwargs . keys ( ) + attack . structural_kwargs
params = attack_params_shared [ dataset ] . copy ( )
params . update ( { k : v for k , v in kwargs . items ( ) if v is not None } )
params = { k : v for k , v in params . items ( ) if k in fd_kwargs }
if '_y' in attack_type :
params [ 'y' ] = y
logging . info ( params )
adv_x = attack . generate ( x , ** params )
return adv_x |
def solution_path ( self , min_lambda , max_lambda , lambda_bins , verbose = 0 ) :
'''Follows the solution path to find the best lambda value .''' | lambda_grid = np . exp ( np . linspace ( np . log ( max_lambda ) , np . log ( min_lambda ) , lambda_bins ) )
aic_trace = np . zeros ( lambda_grid . shape )
# The AIC score for each lambda value
aicc_trace = np . zeros ( lambda_grid . shape )
# The AICc score for each lambda value ( correcting for finite sample size )
bic_trace = np . zeros ( lambda_grid . shape )
# The BIC score for each lambda value
dof_trace = np . zeros ( lambda_grid . shape )
# The degrees of freedom of each final solution
log_likelihood_trace = np . zeros ( lambda_grid . shape )
beta_trace = [ ]
best_idx = None
best_plateaus = None
# Solve the series of lambda values with warm starts at each point
for i , lam in enumerate ( lambda_grid ) :
if verbose :
print ( '#{0} Lambda = {1}' . format ( i , lam ) )
# Fit to the final values
beta = self . solve ( lam )
if verbose :
print ( 'Calculating degrees of freedom' )
# Count the number of free parameters in the grid ( dof )
plateaus = calc_plateaus ( beta , self . edges )
dof_trace [ i ] = len ( plateaus )
if verbose :
print ( 'Calculating AIC' )
# Get the negative log - likelihood
log_likelihood_trace [ i ] = self . log_likelihood ( beta )
# Calculate AIC = 2k - 2ln ( L )
aic_trace [ i ] = 2. * dof_trace [ i ] - 2. * log_likelihood_trace [ i ]
# Calculate AICc = AIC + 2k * ( k + 1 ) / ( n - k - 1)
aicc_trace [ i ] = aic_trace [ i ] + 2 * dof_trace [ i ] * ( dof_trace [ i ] + 1 ) / ( len ( beta ) - dof_trace [ i ] - 1. )
# Calculate BIC = - 2ln ( L ) + k * ( ln ( n ) - ln ( 2pi ) )
bic_trace [ i ] = - 2 * log_likelihood_trace [ i ] + dof_trace [ i ] * ( np . log ( len ( beta ) ) - np . log ( 2 * np . pi ) )
# Track the best model thus far
if best_idx is None or bic_trace [ i ] < bic_trace [ best_idx ] :
best_idx = i
best_plateaus = plateaus
# Save the trace of all the resulting parameters
beta_trace . append ( np . array ( beta ) )
if verbose :
print ( 'DoF: {0} AIC: {1} AICc: {2} BIC: {3}' . format ( dof_trace [ i ] , aic_trace [ i ] , aicc_trace [ i ] , bic_trace [ i ] ) )
if verbose :
print ( 'Best setting (by BIC): lambda={0} [DoF: {1}, AIC: {2}, AICc: {3} BIC: {4}]' . format ( lambda_grid [ best_idx ] , dof_trace [ best_idx ] , aic_trace [ best_idx ] , aicc_trace [ best_idx ] , bic_trace [ best_idx ] ) )
return { 'aic' : aic_trace , 'aicc' : aicc_trace , 'bic' : bic_trace , 'dof' : dof_trace , 'loglikelihood' : log_likelihood_trace , 'beta' : np . array ( beta_trace ) , 'lambda' : lambda_grid , 'best_idx' : best_idx , 'best' : beta_trace [ best_idx ] , 'plateaus' : best_plateaus } |
def logical_or ( self , other ) :
"""logical _ or ( t ) = self ( t ) or other ( t ) .""" | return self . operation ( other , lambda x , y : int ( x or y ) ) |
def add_context ( request ) :
"""Add variables to all dictionaries passed to templates .""" | # Whether the user has president privileges
try :
PRESIDENT = Manager . objects . filter ( incumbent__user = request . user , president = True , ) . count ( ) > 0
except TypeError :
PRESIDENT = False
# If the user is logged in as an anymous user
if request . user . username == ANONYMOUS_USERNAME :
request . session [ "ANONYMOUS_SESSION" ] = True
ANONYMOUS_SESSION = request . session . get ( "ANONYMOUS_SESSION" , False )
# A list with items of form ( RequestType , number _ of _ open _ requests )
request_types = list ( )
if request . user . is_authenticated ( ) :
for request_type in RequestType . objects . filter ( enabled = True ) :
requests = Request . objects . filter ( request_type = request_type , status = Request . OPEN , )
if not request_type . managers . filter ( incumbent__user = request . user ) :
requests = requests . exclude ( ~ Q ( owner__user = request . user ) , private = True , )
request_types . append ( ( request_type , requests . count ( ) ) )
profile_requests_count = ProfileRequest . objects . all ( ) . count ( )
admin_unread_count = profile_requests_count
return { "REQUEST_TYPES" : request_types , "HOUSE" : settings . HOUSE_NAME , "ANONYMOUS_USERNAME" : ANONYMOUS_USERNAME , "SHORT_HOUSE" : settings . SHORT_HOUSE_NAME , "ADMIN" : settings . ADMINS [ 0 ] , "NUM_OF_PROFILE_REQUESTS" : profile_requests_count , "ADMIN_UNREAD_COUNT" : admin_unread_count , "ANONYMOUS_SESSION" : ANONYMOUS_SESSION , "PRESIDENT" : PRESIDENT , } |
def vertices_per_edge ( self ) :
"""Returns an Ex2 array of adjacencies between vertices , where
each element in the array is a vertex index . Each edge is included
only once . Edges that are not shared by 2 faces are not included .""" | import numpy as np
return np . asarray ( [ vertices_in_common ( e [ 0 ] , e [ 1 ] ) for e in self . f [ self . faces_per_edge ] ] ) |
def days_since_last_snowfall ( self , value = 99 ) :
"""Corresponds to IDD Field ` days _ since _ last _ snowfall `
Args :
value ( int ) : value for IDD Field ` days _ since _ last _ snowfall `
Missing value : 99
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = int ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type int ' 'for field `days_since_last_snowfall`' . format ( value ) )
self . _days_since_last_snowfall = value |
def dict_minus ( d , * keys ) :
"""Delete key ( s ) from dict if exists , returning resulting dict""" | # make shallow copy
d = dict ( d )
for key in keys :
try :
del d [ key ]
except :
pass
return d |
def writeIslandBoxes ( filename , catalog , fmt ) :
"""Write an output file in ds9 . reg , or kvis . ann format that contains bounding boxes for all the islands .
Parameters
filename : str
Filename to write .
catalog : list
List of sources . Only those of type : class : ` AegeanTools . models . IslandSource ` will have contours drawn .
fmt : str
Output format type . Currently only ' reg ' and ' ann ' are supported . Default = ' reg ' .
Returns
None
See Also
: func : ` AegeanTools . catalogs . writeIslandContours `""" | if fmt not in [ 'reg' , 'ann' ] :
log . warning ( "Format not supported for island boxes{0}" . format ( fmt ) )
return
# fmt not supported
out = open ( filename , 'w' )
print ( "#Aegean Islands" , file = out )
print ( "#Aegean version {0}-({1})" . format ( __version__ , __date__ ) , file = out )
if fmt == 'reg' :
print ( "IMAGE" , file = out )
box_fmt = 'box({0},{1},{2},{3}) #{4}'
else :
print ( "COORD P" , file = out )
box_fmt = 'box P {0} {1} {2} {3} #{4}'
for c in catalog : # x / y swap for pyfits / numpy translation
ymin , ymax , xmin , xmax = c . extent
# + 1 for array / image offset
xcen = ( xmin + xmax ) / 2.0 + 1
# + 0.5 in each direction to make lines run ' between ' DS9 pixels
xwidth = xmax - xmin + 1
ycen = ( ymin + ymax ) / 2.0 + 1
ywidth = ymax - ymin + 1
print ( box_fmt . format ( xcen , ycen , xwidth , ywidth , c . island ) , file = out )
out . close ( )
return |
def _simulated_chain_result ( self , potential_chain , already_used_bonus ) :
"""Simulate any chain reactions .
Arguments :
potential _ chain : a state to be tested for chain reactions
already _ used _ bonus : boolean indicating whether a bonus turn was already
applied during this action
Return : final result state or None ( if state is filtered out in capture )
Note that if there is no chain reaction , the final result is the
same as the original state received .""" | while potential_chain : # hook for capture game optimizations . no effect in base
# warning : only do this ONCE for any given state or it will
# always filter the second time
if self . _disallow_state ( potential_chain ) :
potential_chain . graft_child ( Filtered ( ) )
return None
# no more simulation for this filtered state
result_board , destroyed_groups = potential_chain . board . execute_once ( random_fill = self . random_fill )
# yield the state if nothing happened during execution ( chain done )
if not destroyed_groups : # yield this state as the final result of the chain
return potential_chain
# attach the transition
chain = ChainReaction ( )
potential_chain . graft_child ( chain )
# attach the result state
if already_used_bonus : # disallow bonus action if already applied
bonus_action = 0
else : # allow bonus action once and then flag as used
bonus_action = any ( len ( group ) >= 4 for group in destroyed_groups )
already_used_bonus = True
cls = potential_chain . __class__
chain_result = cls ( board = result_board , turn = potential_chain . turn , actions_remaining = potential_chain . actions_remaining + bonus_action , player = potential_chain . player . copy ( ) , opponent = potential_chain . opponent . copy ( ) )
# update the player and opponent
base_attack = chain_result . active . apply_tile_groups ( destroyed_groups )
chain_result . passive . apply_attack ( base_attack )
chain . graft_child ( chain_result )
# prepare to try for another chain reaction
potential_chain = chain_result |
def sort_canonical ( keyword , stmts ) :
"""Sort all ` stmts ` in the canonical order defined by ` keyword ` .
Return the sorted list . The ` stmt ` list is not modified .
If ` keyword ` does not have a canonical order , the list is returned
as is .""" | try :
( _arg_type , subspec ) = stmt_map [ keyword ]
except KeyError :
return stmts
res = [ ]
# keep the order of data definition statements and case
keep = [ s [ 0 ] for s in data_def_stmts ] + [ 'case' ]
for ( kw , _spec ) in flatten_spec ( subspec ) : # keep comments before a statement together with that statement
comments = [ ]
for s in stmts :
if s . keyword == '_comment' :
comments . append ( s )
elif s . keyword == kw and kw not in keep :
res . extend ( comments )
comments = [ ]
res . append ( s )
else :
comments = [ ]
# then copy all other statements ( extensions )
res . extend ( [ stmt for stmt in stmts if stmt not in res ] )
return res |
def _normalize_slice ( self , key , clamp = False ) :
"""Return a slice equivalent to the input * key * , standardized .""" | if key . start is None :
start = 0
else :
start = ( len ( self ) + key . start ) if key . start < 0 else key . start
if key . stop is None or key . stop == maxsize :
stop = len ( self ) if clamp else None
else :
stop = ( len ( self ) + key . stop ) if key . stop < 0 else key . stop
return slice ( start , stop , key . step or 1 ) |
def get_config ( ) :
"""Read the configfile and return config dict .
Returns
dict
Dictionary with the content of the configpath file .""" | configpath = get_configpath ( )
if not configpath . exists ( ) :
raise IOError ( "Config file {} not found." . format ( str ( configpath ) ) )
else :
config = configparser . ConfigParser ( )
config . read ( str ( configpath ) )
return config |
def p_word_list ( p ) :
'''word _ list : WORD
| word _ list WORD''' | parserobj = p . context
if len ( p ) == 2 :
p [ 0 ] = [ _expandword ( parserobj , p . slice [ 1 ] ) ]
else :
p [ 0 ] = p [ 1 ]
p [ 0 ] . append ( _expandword ( parserobj , p . slice [ 2 ] ) ) |
def auto ( self , enabled = True , ** kwargs ) :
"""Method to enable or disable automatic capture , allowing you to
simultaneously set the instance parameters .""" | self . namespace = self . get_namespace ( )
self . notebook_name = "{notebook}"
self . _timestamp = tuple ( time . localtime ( ) )
kernel = r'var kernel = IPython.notebook.kernel; '
nbname = r"var nbname = IPython.notebook.get_notebook_name(); "
nbcmd = ( r"var name_cmd = '%s.notebook_name = \"' + nbname + '\"'; " % self . namespace )
cmd = ( kernel + nbname + nbcmd + "kernel.execute(name_cmd); " )
display ( Javascript ( cmd ) )
time . sleep ( 0.5 )
self . _auto = enabled
self . param . set_param ( ** kwargs )
tstamp = time . strftime ( " [%Y-%m-%d %H:%M:%S]" , self . _timestamp )
print ( "Automatic capture is now %s.%s" % ( 'enabled' if enabled else 'disabled' , tstamp if enabled else '' ) ) |
def eta_mass1_to_mass2 ( eta , mass1 , return_mass_heavier = False , force_real = True ) :
"""This function takes values for eta and one component mass and returns the
second component mass . Similar to mchirp _ mass1 _ to _ mass2 this requires
finding the roots of a quadratic equation . Basically :
eta m2 ^ 2 + ( 2 eta - 1 ) m1 m2 + \ eta m1 ^ 2 = 0
This has two solutions which correspond to mass1 being the heavier mass
or it being the lighter mass . By default the value corresponding to
mass1 > mass2 is returned . Use the return _ mass _ heavier kwarg to invert this
behaviour .""" | return conversions . mass_from_knownmass_eta ( mass1 , eta , known_is_secondary = return_mass_heavier , force_real = force_real ) |
def transform ( self , X ) :
"""Add the features calculated using the timeseries _ container and add them to the corresponding rows in the input
pandas . DataFrame X .
To save some computing time , you should only include those time serieses in the container , that you
need . You can set the timeseries container with the method : func : ` set _ timeseries _ container ` .
: param X : the DataFrame to which the calculated timeseries features will be added . This is * not * the
dataframe with the timeseries itself .
: type X : pandas . DataFrame
: return : The input DataFrame , but with added features .
: rtype : pandas . DataFrame""" | if self . timeseries_container is None :
raise RuntimeError ( "You have to provide a time series using the set_timeseries_container function before." )
# Extract only features for the IDs in X . index
timeseries_container_X = restrict_input_to_index ( self . timeseries_container , self . column_id , X . index )
extracted_features = extract_features ( timeseries_container_X , default_fc_parameters = self . default_fc_parameters , kind_to_fc_parameters = self . kind_to_fc_parameters , column_id = self . column_id , column_sort = self . column_sort , column_kind = self . column_kind , column_value = self . column_value , chunksize = self . chunksize , n_jobs = self . n_jobs , show_warnings = self . show_warnings , disable_progressbar = self . disable_progressbar , impute_function = self . impute_function , profile = self . profile , profiling_filename = self . profiling_filename , profiling_sorting = self . profiling_sorting )
X = pd . merge ( X , extracted_features , left_index = True , right_index = True , how = "left" )
return X |
def _encode_char ( char , charmap , defaultchar ) :
"""Encode a single character with the given encoding map
: param char : char to encode
: param charmap : dictionary for mapping characters in this code page""" | if ord ( char ) < 128 :
return ord ( char )
if char in charmap :
return charmap [ char ]
return ord ( defaultchar ) |
def cv_precompute ( self , mask , b ) :
'''Pre - compute the matrices : py : obj : ` A ` and : py : obj : ` B `
( cross - validation step only )
for chunk : py : obj : ` b ` .''' | # Get current chunk and mask outliers
m1 = self . get_masked_chunk ( b )
flux = self . fraw [ m1 ]
K = GetCovariance ( self . kernel , self . kernel_params , self . time [ m1 ] , self . fraw_err [ m1 ] )
med = np . nanmedian ( flux )
# Now mask the validation set
M = lambda x , axis = 0 : np . delete ( x , mask , axis = axis )
m2 = M ( m1 )
mK = M ( M ( K , axis = 0 ) , axis = 1 )
f = M ( flux ) - med
# Pre - compute the matrices
A = [ None for i in range ( self . pld_order ) ]
B = [ None for i in range ( self . pld_order ) ]
for n in range ( self . pld_order ) : # Only compute up to the current PLD order
if self . lam_idx >= n :
X2 = self . X ( n , m2 )
X1 = self . X ( n , m1 )
A [ n ] = np . dot ( X2 , X2 . T )
B [ n ] = np . dot ( X1 , X2 . T )
del X1 , X2
if self . transit_model is None :
C = 0
else :
C = np . zeros ( ( len ( m2 ) , len ( m2 ) ) )
mean_transit_model = med * np . sum ( [ tm . depth * tm ( self . time [ m2 ] ) for tm in self . transit_model ] , axis = 0 )
f -= mean_transit_model
for tm in self . transit_model :
X2 = tm ( self . time [ m2 ] ) . reshape ( - 1 , 1 )
C += tm . var_depth * np . dot ( X2 , X2 . T )
del X2
return A , B , C , mK , f , m1 , m2 |
def remove_from_products ( self , products = None , all_products = False ) :
"""Remove user group from some product license configuration groups ( PLCs ) , or all of them .
: param products : list of product names the user group should be removed from
: param all _ products : a boolean meaning remove from all ( don ' t specify products in this case )
: return : the Group , so you can do Group ( . . . ) . remove _ from _ products ( . . . ) . add _ users ( . . . )""" | if all_products :
if products :
raise ArgumentError ( "When removing from all products, do not specify specific products" )
plist = "all"
else :
if not products :
raise ArgumentError ( "You must specify products from which to remove the user group" )
plist = { GroupTypes . productConfiguration . name : [ product for product in products ] }
return self . append ( remove = plist ) |
def mysql_to_dict ( data , key ) :
'''Convert MySQL - style output to a python dictionary''' | ret = { }
headers = [ '' ]
for line in data :
if not line :
continue
if line . startswith ( '+' ) :
continue
comps = line . split ( '|' )
for comp in range ( len ( comps ) ) :
comps [ comp ] = comps [ comp ] . strip ( )
if len ( headers ) > 1 :
index = len ( headers ) - 1
row = { }
for field in range ( index ) :
if field < 1 :
continue
else :
row [ headers [ field ] ] = salt . utils . stringutils . to_num ( comps [ field ] )
ret [ row [ key ] ] = row
else :
headers = comps
return ret |
def simple_cmd ( ) :
"""` ` Deprecated ` ` : Not better than ` ` fire ` ` - > pip install fire""" | parser = argparse . ArgumentParser ( prog = "Simple command-line function toolkit." , description = """Input function name and args and kwargs.
python xxx.py main -a 1 2 3 -k a=1,b=2,c=3""" , )
parser . add_argument ( "-f" , "--func_name" , default = "main" )
parser . add_argument ( "-a" , "--args" , dest = "args" , nargs = "*" )
parser . add_argument ( "-k" , "--kwargs" , dest = "kwargs" )
parser . add_argument ( "-i" , "-s" , "--info" , "--show" , "--status" , dest = "show" , action = "store_true" , help = "show the args, kwargs and function's source code." , )
params = parser . parse_args ( )
func_name = params . func_name
func = globals ( ) . get ( func_name )
if not ( callable ( func ) ) :
Config . utils_logger . warning ( "invalid func_name: %s" % func_name )
return
args = params . args or [ ]
kwargs = params . kwargs or { }
if kwargs :
items = [ re . split ( "[:=]" , i ) for i in re . split ( "[,;]+" , kwargs ) ]
kwargs = dict ( items )
if params . show :
from inspect import getsource
Config . utils_logger . info ( "args: %s; kwargs: %s" % ( args , kwargs ) )
Config . utils_logger . info ( getsource ( func ) )
return
func ( * args , ** kwargs ) |
def update_subscription ( request , ident ) :
"Shows subscriptions options for a verified subscriber ." | try :
subscription = Subscription . objects . get ( ident = ident )
except Subscription . DoesNotExist :
return respond ( 'overseer/invalid_subscription_token.html' , { } , request )
if request . POST :
form = UpdateSubscriptionForm ( request . POST , instance = subscription )
if form . is_valid ( ) :
if form . cleaned_data [ 'unsubscribe' ] :
subscription . delete ( )
return respond ( 'overseer/unsubscribe_confirmed.html' , { 'email' : subscription . email , } )
else :
form . save ( )
return HttpResponseRedirect ( request . get_full_path ( ) )
else :
form = UpdateSubscriptionForm ( instance = subscription )
context = csrf ( request )
context . update ( { 'form' : form , 'subscription' : subscription , 'service_list' : Service . objects . all ( ) , } )
return respond ( 'overseer/update_subscription.html' , context , request ) |
def _dump_model ( model , attrs = None ) :
"""Dump the model fields for debugging .""" | fields = [ ]
for field in model . _meta . fields :
fields . append ( ( field . name , str ( getattr ( model , field . name ) ) ) )
if attrs is not None :
for attr in attrs :
fields . append ( ( attr , str ( getattr ( model , attr ) ) ) )
for field in model . _meta . many_to_many :
vals = getattr ( model , field . name )
fields . append ( ( field . name , '{val} ({count})' . format ( val = ', ' . join ( map ( str , vals . all ( ) ) ) , count = vals . count ( ) , ) ) )
print ( ', ' . join ( '{0}={1}' . format ( field , value ) for field , value in fields ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.