signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def dense ( * elements ) :
"""Create a dense vector of 64 - bit floats from a Python list or numbers .
> > > Vectors . dense ( [ 1 , 2 , 3 ] )
DenseVector ( [ 1.0 , 2.0 , 3.0 ] )
> > > Vectors . dense ( 1.0 , 2.0)
DenseVector ( [ 1.0 , 2.0 ] )""" | if len ( elements ) == 1 and not isinstance ( elements [ 0 ] , ( float , int , long ) ) : # it ' s list , numpy . array or other iterable object .
elements = elements [ 0 ]
return DenseVector ( elements ) |
def do_break ( self , arg , temporary = 0 ) :
"""b ( reak ) [ ( [ filename : ] lineno | function ) [ , condition ] ]
Without argument , list all breaks .
With a line number argument , set a break at this line in the
current file . With a function name , set a break at the first
executable line of that function . If a second argument is
present , it is a string specifying an expression which must
evaluate to true before the breakpoint is honored .
The line number may be prefixed with a filename and a colon ,
to specify a breakpoint in another file ( probably one that
hasn ' t been loaded yet ) . The file is searched for on
sys . path ; the . py suffix may be omitted .""" | if not arg :
all_breaks = '\n' . join ( bp . bpformat ( ) for bp in bdb . Breakpoint . bpbynumber if bp )
if all_breaks :
self . message ( "Num Type Disp Enb Where" )
self . message ( all_breaks )
return
# Parse arguments , comma has lowest precedence and cannot occur in
# filename .
args = arg . rsplit ( ',' , 1 )
cond = args [ 1 ] . strip ( ) if len ( args ) == 2 else None
# Parse stuff before comma : [ filename : ] lineno | function .
args = args [ 0 ] . rsplit ( ':' , 1 )
name = args [ 0 ] . strip ( )
lineno = args [ 1 ] if len ( args ) == 2 else args [ 0 ]
try :
lineno = int ( lineno )
except ValueError :
if len ( args ) == 2 :
self . error ( 'Bad lineno: "{}".' . format ( lineno ) )
else : # Attempt the list of possible function or method fully
# qualified names and corresponding filenames .
candidates = get_fqn_fname ( name , self . curframe )
for fqn , fname in candidates :
try :
bp = self . set_break ( fname , None , temporary , cond , fqn )
self . message ( 'Breakpoint {:d} at {}:{:d}' . format ( bp . number , bp . file , bp . line ) )
return
except bdb . BdbError :
pass
if not candidates :
self . error ( 'Not a function or a built-in: "{}"' . format ( name ) )
else :
self . error ( 'Bad name: "{}".' . format ( name ) )
else :
filename = self . curframe . f_code . co_filename
if len ( args ) == 2 and name :
filename = name
if filename . startswith ( '<' ) and filename . endswith ( '>' ) : # allow < doctest name > : doctest installs a hook at
# linecache . getlines to allow < doctest name > to be
# linecached and readable .
if filename == '<string>' and self . mainpyfile :
filename = self . mainpyfile
else :
root , ext = os . path . splitext ( filename )
if ext == '' :
filename = filename + '.py'
if not os . path . exists ( filename ) :
self . error ( 'Bad filename: "{}".' . format ( arg ) )
return
try :
bp = self . set_break ( filename , lineno , temporary , cond )
except bdb . BdbError as err :
self . error ( err )
else :
self . message ( 'Breakpoint {:d} at {}:{:d}' . format ( bp . number , bp . file , bp . line ) ) |
def MessageToRepr ( msg , multiline = False , ** kwargs ) :
"""Return a repr - style string for a protorpc message .
protorpc . Message . _ _ repr _ _ does not return anything that could be considered
python code . Adding this function lets us print a protorpc message in such
a way that it could be pasted into code later , and used to compare against
other things .
Args :
msg : protorpc . Message , the message to be repr ' d .
multiline : bool , True if the returned string should have each field
assignment on its own line .
* * kwargs : { str : str } , Additional flags for how to format the string .
Known * * kwargs :
shortstrings : bool , True if all string values should be
truncated at 100 characters , since when mocking the contents
typically don ' t matter except for IDs , and IDs are usually
less than 100 characters .
no _ modules : bool , True if the long module name should not be printed with
each type .
Returns :
str , A string of valid python ( assuming the right imports have been made )
that recreates the message passed into this function .""" | # TODO ( jasmuth ) : craigcitro suggests a pretty - printer from apitools / gen .
indent = kwargs . get ( 'indent' , 0 )
def IndentKwargs ( kwargs ) :
kwargs = dict ( kwargs )
kwargs [ 'indent' ] = kwargs . get ( 'indent' , 0 ) + 4
return kwargs
if isinstance ( msg , list ) :
s = '['
for item in msg :
if multiline :
s += '\n' + ' ' * ( indent + 4 )
s += MessageToRepr ( item , multiline = multiline , ** IndentKwargs ( kwargs ) ) + ','
if multiline :
s += '\n' + ' ' * indent
s += ']'
return s
if isinstance ( msg , messages . Message ) :
s = type ( msg ) . __name__ + '('
if not kwargs . get ( 'no_modules' ) :
s = msg . __module__ + '.' + s
names = sorted ( [ field . name for field in msg . all_fields ( ) ] )
for name in names :
field = msg . field_by_name ( name )
if multiline :
s += '\n' + ' ' * ( indent + 4 )
value = getattr ( msg , field . name )
s += field . name + '=' + MessageToRepr ( value , multiline = multiline , ** IndentKwargs ( kwargs ) ) + ','
if multiline :
s += '\n' + ' ' * indent
s += ')'
return s
if isinstance ( msg , six . string_types ) :
if kwargs . get ( 'shortstrings' ) and len ( msg ) > 100 :
msg = msg [ : 100 ]
if isinstance ( msg , datetime . datetime ) :
class SpecialTZInfo ( datetime . tzinfo ) :
def __init__ ( self , offset ) :
super ( SpecialTZInfo , self ) . __init__ ( )
self . offset = offset
def __repr__ ( self ) :
s = 'TimeZoneOffset(' + repr ( self . offset ) + ')'
if not kwargs . get ( 'no_modules' ) :
s = 'apitools.base.protorpclite.util.' + s
return s
msg = datetime . datetime ( msg . year , msg . month , msg . day , msg . hour , msg . minute , msg . second , msg . microsecond , SpecialTZInfo ( msg . tzinfo . utcoffset ( 0 ) ) )
return repr ( msg ) |
def parse_stats ( self , soup ) :
"""Given :
soup : a bs4 element containing the current media list ' s stats
Return a dict of this media list ' s stats .""" | stats = { }
for row in soup . children :
try :
key = row . name . replace ( u'user_' , u'' )
if key == u'id' :
stats [ key ] = int ( row . text )
elif key == u'name' :
stats [ key ] = row . text
elif key == self . verb + u'ing' :
try :
stats [ key ] = int ( row . text )
except ValueError :
stats [ key ] = 0
elif key == u'completed' :
try :
stats [ key ] = int ( row . text )
except ValueError :
stats [ key ] = 0
elif key == u'onhold' :
try :
stats [ 'on_hold' ] = int ( row . text )
except ValueError :
stats [ key ] = 0
elif key == u'dropped' :
try :
stats [ key ] = int ( row . text )
except ValueError :
stats [ key ] = 0
elif key == u'planto' + self . verb :
try :
stats [ u'plan_to_' + self . verb ] = int ( row . text )
except ValueError :
stats [ key ] = 0
# for some reason , MAL doesn ' t substitute ' read ' in for manga for the verb here
elif key == u'days_spent_watching' :
try :
stats [ u'days_spent' ] = decimal . Decimal ( row . text )
except decimal . InvalidOperation :
stats [ key ] = decimal . Decimal ( 0 )
except :
if not self . session . suppress_parse_exceptions :
raise
return stats |
def features ( self ) :
"""List of features .""" | r = [ ]
for _ , inter in self . props . items ( ) :
if isinstance ( inter , tuple ) :
if ( inter [ 0 ] and inter [ 1 ] and inter [ 0 ] . getValue ( ) == inter [ 1 ] . getValue ( ) and inter [ 0 ] . operator == "=" and inter [ 1 ] . operator == "=" ) :
r . append ( inter [ 0 ] )
else :
r . extend ( [ f for f in inter if f ] )
elif isinstance ( inter , dict ) :
r . extend ( inter . values ( ) )
elif isinstance ( inter , list ) :
r . extend ( inter )
else :
r . append ( inter )
return r |
async def grant ( self , username , acl = 'login' ) :
"""Grant access level of the given user on the controller .
Note that if the user already has higher permissions than the
provided ACL , this will do nothing ( see revoke for a way to
remove permissions ) .
: param str username : Username
: param str acl : Access control ( ' login ' , ' add - model ' or ' superuser ' )
: returns : True if new access was granted , False if user already had
requested access or greater . Raises JujuError if failed .""" | controller_facade = client . ControllerFacade . from_connection ( self . connection ( ) )
user = tag . user ( username )
changes = client . ModifyControllerAccess ( acl , 'grant' , user )
try :
await controller_facade . ModifyControllerAccess ( [ changes ] )
return True
except errors . JujuError as e :
if 'user already has' in str ( e ) :
return False
else :
raise |
def render ( self , trajectories : Tuple [ NonFluents , Fluents , Fluents , Fluents , np . array ] , batch : Optional [ int ] = None ) -> None :
'''Renders the simulated ` trajectories ` for the given ` batch ` .
Args :
trajectories : NonFluents , states , actions , interms and rewards .
batch : Number of batches to render .''' | raise NotImplementedError |
def set_access_port ( self , port_number , vlan_id ) :
"""Sets the specified port as an ACCESS port .
: param port _ number : allocated port number
: param vlan _ id : VLAN number membership""" | if port_number not in self . _nios :
raise DynamipsError ( "Port {} is not allocated" . format ( port_number ) )
nio = self . _nios [ port_number ]
yield from self . _hypervisor . send ( 'ethsw set_access_port "{name}" {nio} {vlan_id}' . format ( name = self . _name , nio = nio , vlan_id = vlan_id ) )
log . info ( 'Ethernet switch "{name}" [{id}]: port {port} set as an access port in VLAN {vlan_id}' . format ( name = self . _name , id = self . _id , port = port_number , vlan_id = vlan_id ) )
self . _mappings [ port_number ] = ( "access" , vlan_id ) |
def thumbnail ( parser , token ) :
"""Creates a thumbnail of for an ImageField .
To just output the absolute url to the thumbnail : :
{ % thumbnail image 80x80 % }
After the image path and dimensions , you can put any options : :
{ % thumbnail image 80x80 force _ ssl = True % }
To put the thumbnail URL on the context instead of just rendering
it , finish the tag with ` ` as [ context _ var _ name ] ` ` : :
{ % thumbnail image 80x80 as thumb % }
< img src = " { { thumb } } " / >""" | args = token . split_contents ( )
tag = args [ 0 ]
# Check to see if we ' re setting to a context variable .
if len ( args ) > 4 and args [ - 2 ] == 'as' :
context_name = args [ - 1 ]
args = args [ : - 2 ]
else :
context_name = None
if len ( args ) < 3 :
raise TemplateSyntaxError ( "Invalid syntax. Expected " "'{%% %s source size [option1 option2 ...] %%}' or " "'{%% %s source size [option1 option2 ...] as variable %%}'" % ( tag , tag ) )
# Get the source image path and requested size .
source_var = args [ 1 ]
# If the size argument was a correct static format , wrap it in quotes so
# that it is compiled correctly .
m = REGEXP_THUMB_SIZES . match ( args [ 2 ] )
if m :
args [ 2 ] = '"%s"' % args [ 2 ]
size_var = args [ 2 ]
# Get the options .
args_list = split_args ( args [ 3 : ] ) . items ( )
# Check the options .
opts = { }
kwargs = { }
# key , values here override settings and defaults
for arg , value in args_list :
value = value and parser . compile_filter ( value )
if arg in TAG_SETTINGS and value is not None :
kwargs [ str ( arg ) ] = value
continue
else :
raise TemplateSyntaxError ( "'%s' tag received a bad argument: " "'%s'" % ( tag , arg ) )
return ThumbnailNode ( source_var , size_var , opts = opts , context_name = context_name , ** kwargs ) |
def create_summary_tear_sheet ( factor_data , long_short = True , group_neutral = False ) :
"""Creates a small summary tear sheet with returns , information , and turnover
analysis .
Parameters
factor _ data : pd . DataFrame - MultiIndex
A MultiIndex DataFrame indexed by date ( level 0 ) and asset ( level 1 ) ,
containing the values for a single alpha factor , forward returns for
each period , the factor quantile / bin that factor value belongs to , and
( optionally ) the group the asset belongs to .
- See full explanation in utils . get _ clean _ factor _ and _ forward _ returns
long _ short : bool
Should this computation happen on a long short portfolio ? if so , then
mean quantile returns will be demeaned across the factor universe .
group _ neutral : bool
Should this computation happen on a group neutral portfolio ? if so ,
returns demeaning will occur on the group level .""" | # Returns Analysis
mean_quant_ret , std_quantile = perf . mean_return_by_quantile ( factor_data , by_group = False , demeaned = long_short , group_adjust = group_neutral )
mean_quant_rateret = mean_quant_ret . apply ( utils . rate_of_return , axis = 0 , base_period = mean_quant_ret . columns [ 0 ] )
mean_quant_ret_bydate , std_quant_daily = perf . mean_return_by_quantile ( factor_data , by_date = True , by_group = False , demeaned = long_short , group_adjust = group_neutral )
mean_quant_rateret_bydate = mean_quant_ret_bydate . apply ( utils . rate_of_return , axis = 0 , base_period = mean_quant_ret_bydate . columns [ 0 ] )
compstd_quant_daily = std_quant_daily . apply ( utils . std_conversion , axis = 0 , base_period = std_quant_daily . columns [ 0 ] )
alpha_beta = perf . factor_alpha_beta ( factor_data , demeaned = long_short , group_adjust = group_neutral )
mean_ret_spread_quant , std_spread_quant = perf . compute_mean_returns_spread ( mean_quant_rateret_bydate , factor_data [ 'factor_quantile' ] . max ( ) , factor_data [ 'factor_quantile' ] . min ( ) , std_err = compstd_quant_daily )
periods = utils . get_forward_returns_columns ( factor_data . columns )
fr_cols = len ( periods )
vertical_sections = 2 + fr_cols * 3
gf = GridFigure ( rows = vertical_sections , cols = 1 )
plotting . plot_quantile_statistics_table ( factor_data )
plotting . plot_returns_table ( alpha_beta , mean_quant_rateret , mean_ret_spread_quant )
plotting . plot_quantile_returns_bar ( mean_quant_rateret , by_group = False , ylim_percentiles = None , ax = gf . next_row ( ) )
# Information Analysis
ic = perf . factor_information_coefficient ( factor_data )
plotting . plot_information_table ( ic )
# Turnover Analysis
quantile_factor = factor_data [ 'factor_quantile' ]
quantile_turnover = { p : pd . concat ( [ perf . quantile_turnover ( quantile_factor , q , p ) for q in range ( 1 , int ( quantile_factor . max ( ) ) + 1 ) ] , axis = 1 ) for p in periods }
autocorrelation = pd . concat ( [ perf . factor_rank_autocorrelation ( factor_data , period ) for period in periods ] , axis = 1 )
plotting . plot_turnover_table ( autocorrelation , quantile_turnover )
plt . show ( )
gf . close ( ) |
def _read_s3_config ( self ) :
"""Read in the value of the configuration file in Amazon S3.
: rtype : str
: raises : ValueError""" | try :
import boto3
import botocore . exceptions
except ImportError :
boto3 , botocore = None , None
if not boto3 :
raise ValueError ( 's3 URL specified for configuration but boto3 not installed' )
parsed = parse . urlparse ( self . _file_path )
try :
response = boto3 . client ( 's3' , endpoint_url = os . environ . get ( 'S3_ENDPOINT' ) ) . get_object ( Bucket = parsed . netloc , Key = parsed . path . lstrip ( '/' ) )
except botocore . exceptions . ClientError as e :
raise ValueError ( 'Failed to download configuration from S3: {}' . format ( e ) )
return response [ 'Body' ] . read ( ) . decode ( 'utf-8' ) |
def delete_data_source ( self , data_source ) :
"""Delete data source with it ' s name or ID .
data _ source = { ' imap ' : { ' name ' : ' data - source - name ' } }
or
data _ source = { ' pop3 ' : { ' id ' : ' data - source - id ' } }""" | source_type = [ k for k in data_source . keys ( ) ] [ 0 ]
complete_source = self . get_data_sources ( source_id = data_source [ source_type ] [ 'id' ] )
folder_id = complete_source [ source_type ] [ 0 ] [ 'l' ]
self . delete_folders ( folder_ids = [ folder_id ] )
return self . request ( 'DeleteDataSource' , data_source ) |
def step ( self , ** args ) :
"""Network . step ( )
Does a single step . Calls propagate ( ) , backprop ( ) , and
change _ weights ( ) if learning is set .
Format for parameters : < layer name > = < activation / target list >""" | if self . verbosity > 0 :
print ( "Network.step() called with:" , args )
# First , copy the values into either activations or targets :
retargs = self . preStep ( ** args )
if retargs :
args = retargs
# replace the args
# Propagate activation through network :
self . propagate ( ** args )
retargs = self . postPropagate ( ** args )
if retargs :
args = retargs
# replace the args
# Next , take care of any Auto - association , and copy
# activations to targets
for aa in self . association :
( inName , outName ) = aa
inLayer = self . getLayer ( inName )
if inLayer . type not in [ 'Input' , "Hidden" ] :
raise LayerError ( 'Associated input layer not type \'Input\' or \'Hidden\'.' , inLayer . type )
outLayer = self . getLayer ( outName )
if not outLayer . type == 'Output' :
raise LayerError ( 'Associated output layer not type \'Output\'.' , outLayer . type )
outLayer . copyTargets ( inLayer . activation )
# Compute error , and back prop it :
retargs = self . preBackprop ( ** args )
if retargs :
args = retargs
# replace the args
( error , correct , total , pcorrect ) = self . backprop ( ** args )
# compute _ error ( )
if self . verbosity > 2 or self . interactive :
self . display ( )
if self . interactive :
self . prompt ( )
retargs = self . postBackprop ( ** args )
if retargs :
args = retargs
# replace the args
# if learning is true , and need to update weights here :
if self . learning and not self . batch :
self . change_weights ( )
# else change weights in sweep
retargs = self . postStep ( ** args )
if retargs :
args = retargs
# replace the args
self . reportPattern ( )
return ( error , correct , total , pcorrect ) |
def ignore_proxy_host ( self ) :
"""Check if self . host is in the $ no _ proxy ignore list .""" | if urllib . proxy_bypass ( self . host ) :
return True
no_proxy = os . environ . get ( "no_proxy" )
if no_proxy :
entries = [ parse_host_port ( x ) for x in no_proxy . split ( "," ) ]
for host , port in entries :
if host . lower ( ) == self . host and port == self . port :
return True
return False |
def update_user ( self , user_name , new_user_name = None , new_path = None ) :
"""Updates name and / or path of the specified user .
: type user _ name : string
: param user _ name : The name of the user
: type new _ user _ name : string
: param new _ user _ name : If provided , the username of the user will be
changed to this username .
: type new _ path : string
: param new _ path : If provided , the path of the user will be
changed to this path .""" | params = { 'UserName' : user_name }
if new_user_name :
params [ 'NewUserName' ] = new_user_name
if new_path :
params [ 'NewPath' ] = new_path
return self . get_response ( 'UpdateUser' , params ) |
def compileInterpolatableTTFsFromDS ( designSpaceDoc , preProcessorClass = TTFInterpolatablePreProcessor , outlineCompilerClass = OutlineTTFCompiler , featureCompilerClass = None , featureWriters = None , glyphOrder = None , useProductionNames = None , cubicConversionError = None , reverseDirection = True , inplace = False , ) :
"""Create FontTools TrueType fonts from the DesignSpaceDocument UFO sources
with interpolatable outlines . Cubic curves are converted compatibly to
quadratic curves using the Cu2Qu conversion algorithm .
If the Designspace contains a " public . skipExportGlyphs " lib key , these
glyphs will not be exported to the final font . If these glyphs are used as
components in any other glyph , those components get decomposed . If the lib
key doesn ' t exist in the Designspace , all glyphs are exported ( keys in
individual UFOs are ignored ) . UFO groups and kerning will be pruned of
skipped glyphs .
The DesignSpaceDocument should contain SourceDescriptor objects with ' font '
attribute set to an already loaded defcon . Font object ( or compatible UFO
Font class ) . If ' font ' attribute is unset or None , an AttributeError exception
is thrown .
Return a copy of the DesignSpaceDocument object ( or the same one if
inplace = True ) with the source ' s ' font ' attribute set to the corresponding
TTFont instance .
For sources that have the ' layerName ' attribute defined , the corresponding TTFont
object will contain only a minimum set of tables ( " head " , " hmtx " , " glyf " , " loca " ,
" maxp " , " post " and " vmtx " ) , and no OpenType layout tables .""" | ufos , layerNames = [ ] , [ ]
for source in designSpaceDoc . sources :
if source . font is None :
raise AttributeError ( "designspace source '%s' is missing required 'font' attribute" % getattr ( source , "name" , "<Unknown>" ) )
ufos . append ( source . font )
# ' layerName ' is None for the default layer
layerNames . append ( source . layerName )
skipExportGlyphs = designSpaceDoc . lib . get ( "public.skipExportGlyphs" , [ ] )
ttfs = compileInterpolatableTTFs ( ufos , preProcessorClass = preProcessorClass , outlineCompilerClass = outlineCompilerClass , featureCompilerClass = featureCompilerClass , featureWriters = featureWriters , glyphOrder = glyphOrder , useProductionNames = useProductionNames , cubicConversionError = cubicConversionError , reverseDirection = reverseDirection , inplace = inplace , layerNames = layerNames , skipExportGlyphs = skipExportGlyphs , )
if inplace :
result = designSpaceDoc
else : # TODO try a more efficient copy method that doesn ' t involve ( de ) serializing
result = designSpaceDoc . __class__ . fromstring ( designSpaceDoc . tostring ( ) )
for source , ttf in zip ( result . sources , ttfs ) :
source . font = ttf
return result |
def syllable_split ( string ) :
'''Split ' string ' into ( stressed ) syllables and punctuation / whitespace .''' | p = r'\'[%s]+|`[%s]+|[%s]+|[^%s\'`\.]+|[^\.]{1}' % ( A , A , A , A )
return re . findall ( p , string , flags = FLAGS ) |
def get_current_url ( environ , root_only = False , strip_querystring = False , host_only = False ) :
"""A handy helper function that recreates the full URL for the current
request or parts of it . Here an example :
> > > from werkzeug import create _ environ
> > > env = create _ environ ( " / ? param = foo " , " http : / / localhost / script " )
> > > get _ current _ url ( env )
' http : / / localhost / script / ? param = foo '
> > > get _ current _ url ( env , root _ only = True )
' http : / / localhost / script / '
> > > get _ current _ url ( env , host _ only = True )
' http : / / localhost / '
> > > get _ current _ url ( env , strip _ querystring = True )
' http : / / localhost / script / '
: param environ : the WSGI environment to get the current URL from .
: param root _ only : set ` True ` if you only want the root URL .
: param strip _ querystring : set to ` True ` if you don ' t want the querystring .
: param host _ only : set to ` True ` if the host URL should be returned .""" | tmp = [ environ [ "wsgi.url_scheme" ] , "://" , get_host ( environ ) ]
cat = tmp . append
if host_only :
return "" . join ( tmp ) + "/"
cat ( quote ( environ . get ( "SCRIPT_NAME" , "" ) . rstrip ( "/" ) ) )
if root_only :
cat ( "/" )
else :
cat ( quote ( "/" + environ . get ( "PATH_INFO" , "" ) . lstrip ( "/" ) ) )
if not strip_querystring :
qs = environ . get ( "QUERY_STRING" )
if qs :
cat ( "?" + qs )
return "" . join ( tmp ) |
def confirmation_pdf ( self , confirmation_id ) :
"""Opens a pdf of a confirmation
: param confirmation _ id : the confirmation id
: return : dict""" | return self . _create_get_request ( resource = CONFIRMATIONS , billomat_id = confirmation_id , command = PDF ) |
def render_source ( self , source , variables = None ) :
"""Render a source with the passed variables .""" | if variables is None :
variables = { }
template = self . _engine . from_string ( source )
return template . render ( ** variables ) |
def install_optimal_reactor ( verbose = False ) :
"""Try to install the optimal Twisted reactor for platform .
: param verbose : If ` ` True ` ` , print what happens .
: type verbose : bool""" | import sys
from twisted . python import reflect
# # determine currently installed reactor , if any
if 'twisted.internet.reactor' in sys . modules :
current_reactor = reflect . qual ( sys . modules [ 'twisted.internet.reactor' ] . __class__ ) . split ( '.' ) [ - 1 ]
else :
current_reactor = None
# # depending on platform , install optimal reactor
if 'bsd' in sys . platform or sys . platform . startswith ( 'darwin' ) : # # * BSD and MacOSX
if current_reactor != 'KQueueReactor' :
try :
v = sys . version_info
if v [ 0 ] == 1 or ( v [ 0 ] == 2 and v [ 1 ] < 6 ) or ( v [ 0 ] == 2 and v [ 1 ] == 6 and v [ 2 ] < 5 ) :
raise Exception ( "Python version too old ({0}) to use kqueue reactor" . format ( sys . version ) )
from twisted . internet import kqreactor
kqreactor . install ( )
except Exception as e :
print ( "WARNING: Running on *BSD or MacOSX, but cannot install kqueue Twisted reactor ({0})." . format ( e ) )
else :
if verbose :
print ( "Running on *BSD or MacOSX and optimal reactor (kqueue) was installed." )
else :
if verbose :
print ( "Running on *BSD or MacOSX and optimal reactor (kqueue) already installed." )
elif sys . platform in [ 'win32' ] : # # Windows
if current_reactor != 'IOCPReactor' :
try :
from twisted . internet . iocpreactor import reactor as iocpreactor
iocpreactor . install ( )
except Exception as e :
print ( "WARNING: Running on Windows, but cannot install IOCP Twisted reactor ({0})." . format ( e ) )
else :
if verbose :
print ( "Running on Windows and optimal reactor (ICOP) was installed." )
else :
if verbose :
print ( "Running on Windows and optimal reactor (ICOP) already installed." )
elif sys . platform . startswith ( 'linux' ) : # # Linux
if current_reactor != 'EPollReactor' :
try :
from twisted . internet import epollreactor
epollreactor . install ( )
except Exception as e :
print ( "WARNING: Running on Linux, but cannot install Epoll Twisted reactor ({0})." . format ( e ) )
else :
if verbose :
print ( "Running on Linux and optimal reactor (epoll) was installed." )
else :
if verbose :
print ( "Running on Linux and optimal reactor (epoll) already installed." )
else :
try :
from twisted . internet import default as defaultreactor
defaultreactor . install ( )
except Exception as e :
print ( "WARNING: Could not install default Twisted reactor for this platform ({0})." . format ( e ) ) |
def _get_client ( ) :
'''Return a cloud client''' | client = salt . cloud . CloudClient ( os . path . join ( os . path . dirname ( __opts__ [ 'conf_file' ] ) , 'cloud' ) , pillars = copy . deepcopy ( __pillar__ . get ( 'cloud' , { } ) ) )
return client |
def build_pages ( self ) :
"""Iterate over the pages _ dir and build the pages""" | for root , _ , files in os . walk ( self . pages_dir ) :
base_dir = root . replace ( self . pages_dir , "" ) . lstrip ( "/" )
if not base_dir . startswith ( "_" ) :
for f in files :
src_file = os . path . join ( base_dir , f )
self . _build_page ( src_file ) |
def read ( self ) :
"""Read the target value
Use $ project aggregate operator in order to support nested objects""" | result = self . get_collection ( ) . aggregate ( [ { '$match' : { '_id' : self . _document_id } } , { '$project' : { '_value' : '$' + self . _path , '_id' : False } } ] )
for doc in result :
if '_value' not in doc :
break
return doc [ '_value' ] |
def add_scan_host_detail ( self , scan_id , host = '' , name = '' , value = '' ) :
"""Adds a host detail result to scan _ id scan .""" | self . scan_collection . add_result ( scan_id , ResultType . HOST_DETAIL , host , name , value ) |
def search_url ( self , var = DEFAULT_SEARCH_ENV , default = NOTSET , engine = None ) :
"""Returns a config dictionary , defaulting to SEARCH _ URL .
: rtype : dict""" | return self . search_url_config ( self . url ( var , default = default ) , engine = engine ) |
def stop ( self ) :
"""Stop the client . This sends a signal to the clients main task which
makes it terminate .
It may take some cycles through the event loop to stop the client
task . To check whether the task has actually stopped , query
: attr : ` running ` .""" | if not self . running :
return
self . logger . debug ( "stopping main task of %r" , self , stack_info = True )
self . _main_task . cancel ( ) |
def diff_table ( self , table1 , table2 ) :
"""Returns the difference between the tables table1 and table2.
: type table1 : Table
: type table2 : Table
: rtype : TableDiff""" | changes = 0
table_differences = TableDiff ( table1 . get_name ( ) )
table_differences . from_table = table1
table1_columns = table1 . get_columns ( )
table2_columns = table2 . get_columns ( )
# See if all the fields in table1 exist in table2
for column_name , column in table2_columns . items ( ) :
if not table1 . has_column ( column_name ) :
table_differences . added_columns [ column_name ] = column
changes += 1
# See if there are any removed fields in table2
for column_name , column in table1_columns . items ( ) :
if not table2 . has_column ( column_name ) :
table_differences . removed_columns [ column_name ] = column
changes += 1
continue
# See if column has changed properties in table2
changed_properties = self . diff_column ( column , table2 . get_column ( column_name ) )
if changed_properties :
column_diff = ColumnDiff ( column . get_name ( ) , table2 . get_column ( column_name ) , changed_properties )
column_diff . from_column = column
table_differences . changed_columns [ column . get_name ( ) ] = column_diff
changes += 1
self . detect_column_renamings ( table_differences )
# table1 _ indexes = table1 . get _ indexes ( )
# table2 _ indexes = table2 . get _ indexes ( )
# # See if all the fields in table1 exist in table2
# for index _ name , index in table2 _ indexes . items ( ) :
# if ( index . is _ primary ( ) and table1 . has _ primary _ key ( ) ) or table1 . has _ index ( index _ name ) :
# continue
# table _ differences . added _ indexes [ index _ name ] = index
# changes + = 1
# # See if there are any removed fields in table2
# for index _ name , index in table1 _ indexes . items ( ) :
# if ( index . is _ primary ( ) and not table2 . has _ primary _ key ( ) ) \
# or ( not index . is _ primary ( ) and not table2 . has _ index ( index _ name ) ) :
# table _ differences . removed _ indexes [ index _ name ] = index
# changes + = 1
# continue
# if index . is _ primary ( ) :
# table2 _ index = table2 . get _ primary _ key ( )
# else :
# table2 _ index = table2 . get _ index ( index _ name )
# if self . diff _ index ( index , table2 _ index ) :
# table _ differences . changed _ indexes [ index _ name ] = index
# changes + = 1
# self . detect _ index _ renamings ( table _ differences )
# from _ fkeys = table1 . get _ foreign _ keys ( )
# to _ fkeys = table2 . get _ foreign _ keys ( )
# for key1 , constraint1 in from _ fkeys . items ( ) :
# for key2 , constraint2 in to _ fkeys . items ( ) :
# if self . diff _ foreign _ key ( constraint1 , constraint2 ) is False :
# del from _ fkeys [ key1]
# del to _ fkeys [ key2]
# else :
# if constraint1 . get _ name ( ) . lower ( ) = = constraint2 . get _ name ( ) . lower ( ) :
# table _ differences . changed _ foreign _ keys . append ( constraint2)
# changes + = 1
# del from _ fkeys [ key1]
# del to _ fkeys [ key2]
# for constraint1 in from _ fkeys . values ( ) :
# table _ differences . removed _ foreign _ keys . append ( constraint1)
# changes + = 1
# for constraint2 in to _ fkeys . values ( ) :
# table _ differences . added _ foreign _ keys . append ( constraint2)
# changes + = 1
if changes :
return table_differences
return False |
def get_mean_and_stddevs ( self , sites , rup , dists , imt , stddev_types ) :
"""See : meth : ` superclass method
< . base . GroundShakingIntensityModel . get _ mean _ and _ stddevs > `
for spec of input and result values .""" | # extracting dictionary of coefficients specific to required
# intensity measure type .
C = self . COEFFS [ imt ]
C_PGA = self . COEFFS [ PGA ( ) ]
imt_per = 0 if imt . name == 'PGV' else imt . period
pga_rock = self . _get_pga_on_rock ( C_PGA , rup , dists )
mean = ( self . _get_magnitude_scaling_term ( C , rup ) + self . _get_path_scaling ( C , dists , rup . mag ) + self . _get_site_scaling ( C , pga_rock , sites , imt_per , dists . rjb ) )
stddevs = self . _get_stddevs ( C , rup , dists , sites , stddev_types )
return mean , stddevs |
def _GetAPFSVolumeIdentifiers ( self , scan_node ) :
"""Determines the APFS volume identifiers .
Args :
scan _ node ( SourceScanNode ) : scan node .
Returns :
list [ str ] : APFS volume identifiers .
Raises :
ScannerError : if the format of or within the source is not supported
or the the scan node is invalid .
UserAbort : if the user requested to abort .""" | if not scan_node or not scan_node . path_spec :
raise errors . ScannerError ( 'Invalid scan node.' )
volume_system = apfs_volume_system . APFSVolumeSystem ( )
volume_system . Open ( scan_node . path_spec )
volume_identifiers = self . _source_scanner . GetVolumeIdentifiers ( volume_system )
if not volume_identifiers :
return [ ]
if len ( volume_identifiers ) > 1 :
if not self . _mediator :
raise errors . ScannerError ( 'Unable to proceed. APFS volumes found but no mediator to ' 'determine how they should be used.' )
try :
volume_identifiers = self . _mediator . GetAPFSVolumeIdentifiers ( volume_system , volume_identifiers )
except KeyboardInterrupt :
raise errors . UserAbort ( 'File system scan aborted.' )
return self . _NormalizedVolumeIdentifiers ( volume_system , volume_identifiers , prefix = 'apfs' ) |
def import_from_path ( path ) :
"""Import a class dynamically , given it ' s dotted path .
: param path : the path of the module
: type path : string
: return : Return the value of the named attribute of object .
: rtype : object""" | module_name , class_name = path . rsplit ( '.' , 1 )
try :
return getattr ( __import__ ( module_name , fromlist = [ class_name ] ) , class_name )
except AttributeError :
raise ImportError ( 'Unable to import %s' % path ) |
def from_otgformat ( self , otgOrder ) :
"""[ summary ]
Arguments :
otgOrder { [ type ] } - - [ description ]
{ ' seqno ' : 6,
' user _ id ' : ' 106184 ' ,
' order _ id ' : ' WDRB _ QA01 _ FtNlyBem ' ,
' exchange _ id ' : ' SHFE ' ,
' instrument _ id ' : ' rb1905 ' ,
' direction ' : ' SELL ' ,
' offset ' : ' OPEN ' ,
' volume _ orign ' : 50 , # ( 总报单手数 )
' price _ type ' : ' LIMIT ' , # " LIMIT " ( 价格类型 , ANY = 市价 , LIMIT = 限价 )
' limit _ price ' : 3432.0 , # 4500.0 ( 委托价格 , 仅当 price _ type = LIMIT 时有效 )
' time _ condition ' : ' GFD ' , # " GFD " ( 时间条件 , IOC = 立即完成 , 否则撤销 , GFS = 本节有效 , GFD = 当日有效 , GTC = 撤销前有效 , GFA = 集合竞价有效 )
' volume _ condition ' : ' ANY ' , # " ANY " ( 手数条件 , ANY = 任何数量 , MIN = 最小数量 , ALL = 全部数量 )
' insert _ date _ time ' : 15456564600000 , # 150107487200000 ( 下单时间 ( 按北京时间 ) , 自unix epoch ( 1970-01-01 00:00:00 GMT ) 以来的纳秒数 )
' exchange _ order _ id ' : ' 3738 ' ,
' status ' : ' FINISHED ' , # " ALIVE " ( 委托单状态 , ALIVE = 有效 , FINISHED = 已完 )
' volume _ left ' : 0,
' last _ msg ' : ' 全部成交报单已提交 ' } # " 报单成功 " ( 委托单状态信息 )""" | self . order_id = otgOrder . get ( 'order_id' )
self . account_cookie = otgOrder . get ( 'user_id' )
self . exchange_id = otgOrder . get ( 'exchange_id' )
self . code = str ( otgOrder . get ( 'instrument_id' ) ) . upper ( )
self . offset = otgOrder . get ( 'offset' )
self . direction = otgOrder . get ( 'direction' )
self . towards = eval ( 'ORDER_DIRECTION.{}_{}' . format ( self . direction , self . offset ) )
self . amount = otgOrder . get ( 'volume_orign' )
self . trade_amount = self . amount - otgOrder . get ( 'volume_left' )
self . price = otgOrder . get ( 'limit_price' )
self . order_model = eval ( 'ORDER_MODEL.{}' . format ( otgOrder . get ( 'price_type' ) ) )
self . time_condition = otgOrder . get ( 'time_condition' )
if otgOrder . get ( 'insert_date_time' ) == 0 :
self . datetime = 0
else :
self . datetime = QA_util_stamp2datetime ( int ( otgOrder . get ( 'insert_date_time' ) ) )
self . sending_time = self . datetime
self . volume_condition = otgOrder . get ( 'volume_condition' )
self . message = otgOrder . get ( 'last_msg' )
self . _status = ORDER_STATUS . NEW
if '已撤单' in self . message or '拒绝' in self . message or '仓位不足' in self . message : # 仓位不足 : 一般是平今 / 平昨仓位不足
self . _status = ORDER_STATUS . FAILED
self . realorder_id = otgOrder . get ( 'exchange_order_id' )
return self |
def v1_tag_suggest ( request , tags , prefix , parent = '' ) :
'''Provide fast suggestions for tag components .
This yields suggestions for * components * of a tag and a given
prefix . For example , given the tags ` ` foo / bar / baz ` ` and
` ` fob / bob ` ` , here are some example completions ( ordering may be
different ) :
. . code - block : : text
/ dossier / v1 / tags / suggest / prefix / f = > [ ' foo ' , ' fob ' ]
/ dossier / v1 / tags / suggest / prefix / foo = > [ ' foo ' ]
/ dossier / v1 / tags / suggest / prefix / b / parent / foo = > [ ' bar ' ]
/ dossier / v1 / tags / suggest / prefix / b / parent / fob = > [ ' bob ' ]
/ dossier / v1 / tags / suggest / prefix / b / parent / foo / bar = > [ ' baz ' ]
N . B . Each of the lists above are wrapped in the following
JSON envelope for the response :
. . code - block : : text
{ ' suggestions ' : [ ' foo ' , ' fob ' ] }
An optional query parameter , ` ` limit ` ` , may be passed to control
the number of suggestions returned .''' | prefix = prefix . decode ( 'utf-8' ) . strip ( )
parent = parent . decode ( 'utf-8' ) . strip ( )
limit = min ( 10000 , int ( request . params . get ( 'limit' , 100 ) ) )
return { 'suggestions' : tags . suggest ( parent , prefix , limit = limit ) } |
def on_lstCategories_itemSelectionChanged ( self ) :
"""Update purpose description label .
. . note : : This is an automatic Qt slot
executed when the purpose selection changes .""" | self . clear_further_steps ( )
# Set widgets
purpose = self . selected_purpose ( )
# Exit if no selection
if not purpose :
return
# Set description label
self . lblDescribeCategory . setText ( purpose [ "description" ] )
self . lblIconCategory . setPixmap ( QPixmap ( resources_path ( 'img' , 'wizard' , 'keyword-category-%s.svg' % ( purpose [ 'key' ] or 'notset' ) ) ) )
# Enable the next button
self . parent . pbnNext . setEnabled ( True ) |
def python_value ( self , dtype , dvalue ) :
"""Convert a CLIPS type into Python .""" | try :
return CONVERTERS [ dtype ] ( dvalue )
except KeyError :
if dtype == clips . common . CLIPSType . MULTIFIELD :
return self . multifield_to_list ( )
if dtype == clips . common . CLIPSType . FACT_ADDRESS :
return clips . facts . new_fact ( self . _env , lib . to_pointer ( dvalue ) )
if dtype == clips . common . CLIPSType . INSTANCE_ADDRESS :
return clips . classes . Instance ( self . _env , lib . to_pointer ( dvalue ) )
return None |
def payload_unregister ( klass , pid ) :
"""is used while a hook is running to let Juju know
that a payload has been manually stopped . The < class > and < id > provided
must match a payload that has been previously registered with juju using
payload - register .""" | cmd = [ 'payload-unregister' ]
for x in [ klass , pid ] :
cmd . append ( x )
subprocess . check_call ( cmd ) |
def fallback ( message : str , ex : Exception ) -> None :
"""Fallback procedure when a cli command fails .
: param message : message to be logged
: param ex : Exception which caused the failure""" | logging . error ( '%s' , message )
logging . exception ( '%s' , ex )
sys . exit ( 1 ) |
def appliance_node_information ( self ) :
"""Gets the ApplianceNodeInformation API client .
Returns :
ApplianceNodeInformation :""" | if not self . __appliance_node_information :
self . __appliance_node_information = ApplianceNodeInformation ( self . __connection )
return self . __appliance_node_information |
def encoding ( encoding = True ) :
"""DEPRECATED : use pynvim . decode ( ) .""" | if isinstance ( encoding , str ) :
encoding = True
def dec ( f ) :
f . _nvim_decode = encoding
return f
return dec |
def from_api_repr ( cls , resource , config ) :
"""Factory : construct a Variable given its API representation
: type resource : dict
: param resource : change set representation returned from the API .
: type config : : class : ` google . cloud . runtimeconfig . config . Config `
: param config : The config to which this variable belongs .
: rtype : : class : ` google . cloud . runtimeconfig . variable . Variable `
: returns : Variable parsed from ` ` resource ` ` .""" | name = variable_name_from_full_name ( resource . get ( "name" ) )
variable = cls ( name = name , config = config )
variable . _set_properties ( resource = resource )
return variable |
def get_geometry ( self , geo_level , geo_code ) :
"""Get the geometry description for a geography . This is a dict
with two keys , ' properties ' which is a dict of properties ,
and ' shape ' which is a shapely shape ( may be None ) .""" | mapit_level = SETTINGS [ 'level_codes' ] [ geo_level ]
url = SETTINGS [ 'url' ] + '/area/MDB:%s/feature.geojson?type=%s' % ( geo_code , mapit_level )
url = url + '&generation=%s' % SETTINGS [ 'generation' ]
simplify = SETTINGS [ 'level_simplify' ] . get ( mapit_level )
if simplify :
url = url + '&simplification_level=%s' % simplify
resp = requests . get ( url )
if resp . status_code == 404 :
return None
resp . raise_for_status ( )
feature = resp . json ( )
shape = asShape ( feature [ 'geometry' ] )
return { 'properties' : feature [ 'properties' ] , 'shape' : shape , } |
def expr_to_tree ( ind , pset ) :
"""Convert the unstructured DEAP pipeline into a tree data - structure .
Parameters
ind : deap . creator . Individual
The pipeline that is being exported
Returns
pipeline _ tree : list
List of operators in the current optimized pipeline
EXAMPLE :
pipeline :
" DecisionTreeClassifier ( input _ matrix , 28.0 ) "
pipeline _ tree :
[ ' DecisionTreeClassifier ' , ' input _ matrix ' , 28.0]""" | def prim_to_list ( prim , args ) :
if isinstance ( prim , deap . gp . Terminal ) :
if prim . name in pset . context :
return pset . context [ prim . name ]
else :
return prim . value
return [ prim . name ] + args
tree = [ ]
stack = [ ]
for node in ind :
stack . append ( ( node , [ ] ) )
while len ( stack [ - 1 ] [ 1 ] ) == stack [ - 1 ] [ 0 ] . arity :
prim , args = stack . pop ( )
tree = prim_to_list ( prim , args )
if len ( stack ) == 0 :
break
# If stack is empty , all nodes should have been seen
stack [ - 1 ] [ 1 ] . append ( tree )
return tree |
def find_mrms_tracks ( self ) :
"""Identify objects from MRMS timesteps and link them together with object matching .
Returns :
List of STObjects containing MESH track information .""" | obs_objects = [ ]
tracked_obs_objects = [ ]
if self . mrms_ew is not None :
self . mrms_grid . load_data ( )
if len ( self . mrms_grid . data ) != len ( self . hours ) :
print ( 'Less than 24 hours of observation data found' )
return tracked_obs_objects
for h , hour in enumerate ( self . hours ) :
mrms_data = np . zeros ( self . mrms_grid . data [ h ] . shape )
mrms_data [ : ] = np . array ( self . mrms_grid . data [ h ] )
mrms_data [ mrms_data < 0 ] = 0
hour_labels = self . mrms_ew . size_filter ( self . mrms_ew . label ( gaussian_filter ( mrms_data , self . gaussian_window ) ) , self . size_filter )
hour_labels [ mrms_data < self . mrms_ew . min_thresh ] = 0
obj_slices = find_objects ( hour_labels )
num_slices = len ( obj_slices )
obs_objects . append ( [ ] )
if num_slices > 0 :
for sl in obj_slices :
obs_objects [ - 1 ] . append ( STObject ( mrms_data [ sl ] , np . where ( hour_labels [ sl ] > 0 , 1 , 0 ) , self . model_grid . x [ sl ] , self . model_grid . y [ sl ] , self . model_grid . i [ sl ] , self . model_grid . j [ sl ] , hour , hour , dx = self . model_grid . dx ) )
if h > 0 :
dims = obs_objects [ - 1 ] [ - 1 ] . timesteps [ 0 ] . shape
obs_objects [ - 1 ] [ - 1 ] . estimate_motion ( hour , self . mrms_grid . data [ h - 1 ] , dims [ 1 ] , dims [ 0 ] )
for h , hour in enumerate ( self . hours ) :
past_time_objs = [ ]
for obj in tracked_obs_objects :
if obj . end_time == hour - 1 :
past_time_objs . append ( obj )
if len ( past_time_objs ) == 0 :
tracked_obs_objects . extend ( obs_objects [ h ] )
elif len ( past_time_objs ) > 0 and len ( obs_objects [ h ] ) > 0 :
assignments = self . object_matcher . match_objects ( past_time_objs , obs_objects [ h ] , hour - 1 , hour )
unpaired = list ( range ( len ( obs_objects [ h ] ) ) )
for pair in assignments :
past_time_objs [ pair [ 0 ] ] . extend ( obs_objects [ h ] [ pair [ 1 ] ] )
unpaired . remove ( pair [ 1 ] )
if len ( unpaired ) > 0 :
for up in unpaired :
tracked_obs_objects . append ( obs_objects [ h ] [ up ] )
print ( "Tracked Obs Objects: {0:03d} Hour: {1:02d}" . format ( len ( tracked_obs_objects ) , hour ) )
return tracked_obs_objects |
def sharey ( axes ) :
"""Shared axes limits without shared locators , ticks , etc .
By Joe Kington""" | linker = Linker ( axes )
for ax in axes :
ax . _linker = linker |
def _mems_updated_cb ( self ) :
"""Called when the memories have been identified""" | logger . info ( 'Memories finished updating' )
self . param . refresh_toc ( self . _param_toc_updated_cb , self . _toc_cache ) |
def get_readme ( ) :
"""Get the contents of the ` ` README . rst ` ` file as a Unicode string .""" | try :
import pypandoc
description = pypandoc . convert ( 'README.md' , 'rst' )
except ( IOError , ImportError ) :
description = open ( 'README.md' ) . read ( )
return description |
def kill ( self , sig = signal . SIGTERM ) :
"""Terminate the test job .
Kill the subprocess if it was spawned , abort the spawning
process otherwise . This information can be collected afterwards
by reading the self . killed and self . spawned flags .
Also join the 3 related threads to the caller thread . This can
be safely called from any thread .
This method behaves as self . join ( ) when the thread isn ' t alive ,
i . e . , it doesn ' t raise an exception .
The ` ` sig ` ` parameter should be either :
* ` ` signal . SIGKILL ` ` ( ` ` 9 ` ` ) , on Linux or OSX ;
* ` ` signal . SIGTERM ` ` ( ` ` 15 ` ` ) , the default value . On Windows
this one calls the ` ` TerminateProcess ` ` Win32 API function .""" | while self . is_alive ( ) :
self . killed = True
time . sleep ( POLLING_DELAY )
# " Was a process spawned ? " polling
if not self . spawned :
continue
# Either self . run returns or runner yields
if self . process . poll ( ) is None : # It ' s running
self . process . send_signal ( sig )
try : # There ' s no os . WNOHANG in Windows
os . waitpid ( self . process . pid , getattr ( os , "WNOHANG" , 1 ) )
except OSError : # Ignore " No child processes " error
pass
break
# We already either killed or finished it
self . join ( ) |
def clean ( self ) :
"""Checks that there is almost one field to select""" | if any ( self . errors ) : # Don ' t bother validating the formset unless each form is valid on
# its own
return
( selects , aliases , froms , wheres , sorts , groups_by , params ) = self . get_query_parts ( )
if not selects :
validation_message = _ ( u"At least you must check a row to get." )
raise forms . ValidationError ( validation_message )
self . _selects = selects
self . _aliases = aliases
self . _froms = froms
self . _wheres = wheres
self . _sorts = sorts
self . _groups_by = groups_by
self . _params = params |
def reflectance ( self , band ) :
"""Calculate top of atmosphere reflectance of Landsat 8
as outlined here : http : / / landsat . usgs . gov / Landsat8 _ Using _ Product . php
R _ raw = MR * Q + AR
R = R _ raw / cos ( Z ) = R _ raw / sin ( E )
Z = 90 - E ( in degrees )
where :
R _ raw = TOA planetary reflectance , without correction for solar angle .
R = TOA reflectance with a correction for the sun angle .
MR = Band - specific multiplicative rescaling factor from the metadata
( REFLECTANCE _ MULT _ BAND _ x , where x is the band number )
AR = Band - specific additive rescaling factor from the metadata
( REFLECTANCE _ ADD _ BAND _ x , where x is the band number )
Q = Quantized and calibrated standard product pixel values ( DN )
E = Local sun elevation angle . The scene center sun elevation angle
in degrees is provided in the metadata ( SUN _ ELEVATION ) .
Z = Local solar zenith angle ( same angle as E , but measured from the
zenith instead of from the horizon ) .
Returns
ndarray :
float32 ndarray with shape = = input shape""" | if band not in self . oli_bands :
raise ValueError ( 'Landsat 8 reflectance should OLI band (i.e. bands 1-8)' )
elev = getattr ( self , 'sun_elevation' )
dn = self . _get_band ( 'b{}' . format ( band ) )
mr = getattr ( self , 'reflectance_mult_band_{}' . format ( band ) )
ar = getattr ( self , 'reflectance_add_band_{}' . format ( band ) )
if elev < 0.0 :
raise ValueError ( "Sun elevation must be non-negative " "(sun must be above horizon for entire scene)" )
rf = ( ( mr * dn . astype ( float32 ) ) + ar ) / sin ( deg2rad ( elev ) )
return rf |
def main ( ) :
'''Main routine .''' | # validate command line arguments
arg_parser = argparse . ArgumentParser ( )
arg_parser . add_argument ( '--vmssname' , '-n' , required = True , action = 'store' , help = 'Scale set name' )
arg_parser . add_argument ( '--rgname' , '-g' , required = True , action = 'store' , help = 'Resource Group Name' )
arg_parser . add_argument ( '--operation' , '-o' , required = True , action = 'store' , help = 'Operation (attach/detach)' )
arg_parser . add_argument ( '--vmid' , '-i' , required = True , action = 'store' , help = 'VM id' )
arg_parser . add_argument ( '--lun' , '-l' , required = True , action = 'store' , help = 'lun id' )
arg_parser . add_argument ( '--diskname' , '-d' , required = False , action = 'store' , help = 'Optional password' )
args = arg_parser . parse_args ( )
vmssname = args . vmssname
rgname = args . rgname
operation = args . operation
vmid = args . vmid
lun = int ( args . lun )
diskname = args . diskname
if operation != 'attach' and operation != 'detach' :
sys . exit ( '--operation must be attach or detach' )
if diskname is None and operation == 'attach' :
sys . exit ( '--diskname is required for attach operation.' )
# Load Azure app defaults
try :
with open ( 'azurermconfig.json' ) as config_file :
config_data = json . load ( config_file )
except FileNotFoundError :
sys . exit ( "Error: Expecting azurermconfig.json in current folder" )
tenant_id = config_data [ 'tenantId' ]
app_id = config_data [ 'appId' ]
app_secret = config_data [ 'appSecret' ]
subscription_id = config_data [ 'subscriptionId' ]
# authenticate
access_token = azurerm . get_access_token ( tenant_id , app_id , app_secret )
# do a get on the VM
vmssvm_model = azurerm . get_vmss_vm ( access_token , subscription_id , rgname , vmssname , vmid )
# check operation
if operation == 'attach' :
new_model = attach_model ( subscription_id , rgname , vmssvm_model , diskname , lun )
else :
if operation == 'detach' :
new_model = detach_model ( vmssvm_model , lun )
# do a put on the VM
rmreturn = azurerm . put_vmss_vm ( access_token , subscription_id , rgname , vmssname , vmid , new_model )
if rmreturn . status_code != 201 and rmreturn . status_code != 202 :
sys . exit ( 'Error ' + str ( rmreturn . status_code ) + ' creating VM. ' + rmreturn . text )
print ( json . dumps ( rmreturn , sort_keys = False , indent = 2 , separators = ( ',' , ': ' ) ) ) |
def set_bsd_socket_params ( self , port_reuse = None ) :
"""Sets BSD - sockets related params .
: param bool port _ reuse : Enable REUSE _ PORT flag on socket to allow multiple
instances binding on the same address ( BSD only ) .""" | self . _set ( 'reuse-port' , port_reuse , cast = bool )
return self . _section |
def newFromSites ( self , sites , exclude = False ) :
"""Create a new read from self , with only certain sites .
@ param sites : A set of C { int } 0 - based sites ( i . e . , indices ) in
sequences that should be kept . If C { None } ( the default ) , all sites
are kept .
@ param exclude : If C { True } the C { sites } will be excluded , not
included .""" | if exclude :
sites = set ( range ( len ( self ) ) ) - sites
newSequence = [ ]
newStructure = [ ]
for index , ( base , structure ) in enumerate ( zip ( self . sequence , self . structure ) ) :
if index in sites :
newSequence . append ( base )
newStructure . append ( structure )
read = self . __class__ ( self . id , '' . join ( newSequence ) , '' . join ( newStructure ) )
return read |
def patch_file ( patch_stream : TextIO , fromcsv_stream : TextIO , tocsv_stream : TextIO , strict : bool = True , sep : str = ',' ) :
"""Apply the patch to the source CSV file , and save the result to the target
file .""" | diff = patch . load ( patch_stream )
from_records = records . load ( fromcsv_stream , sep = sep )
to_records = patch . apply ( diff , from_records , strict = strict )
# what order should the columns be in ?
if to_records : # have data , use a nice ordering
all_columns = to_records [ 0 ] . keys ( )
index_columns = diff [ '_index' ]
fieldnames = _nice_fieldnames ( all_columns , index_columns )
else : # no data , use the original order
fieldnames = from_records . fieldnames
records . save ( to_records , fieldnames , tocsv_stream ) |
def find_state ( self , container , start = None , end = None , avoid = None , initial_state = None , final_state = None ) :
"""Execute instructions .""" | self . __set_cpu_state ( initial_state )
# Convert input native addresses to reil addresses .
start = to_reil_address ( start ) if start else None
end = to_reil_address ( end ) if end else None
avoid = [ to_reil_address ( addr ) for addr in avoid ] if avoid else [ ]
# Load instruction pointer .
ip = start if start else container [ 0 ] . address
execution_state = Queue ( )
trace_current = [ ]
trace_final = [ ]
self . __fs_process_container ( container , final_state , ip , end , avoid , initial_state , execution_state , trace_current , trace_final )
# Only returns when all paths have been visited .
assert execution_state . empty ( )
return trace_final |
def renew ( cls , fqdn , duration , background ) :
"""Renew a domain .""" | fqdn = fqdn . lower ( )
if not background and not cls . intty ( ) :
background = True
domain_info = cls . info ( fqdn )
current_year = domain_info [ 'date_registry_end' ] . year
domain_params = { 'duration' : duration , 'current_year' : current_year , }
result = cls . call ( 'domain.renew' , fqdn , domain_params )
if background :
return result
# interactive mode , run a progress bar
cls . echo ( 'Renewing your domain.' )
cls . display_progress ( result )
cls . echo ( 'Your domain %s has been renewed.' % fqdn ) |
def process_match ( match , fixed_text , cur , cur_end ) :
"""Processes a single match in rules""" | # Set our tools
# - - Initial / default value for replace
replace = True
# - - Set check cursor depending on match [ ' type ' ]
if match [ 'type' ] == 'prefix' :
chk = cur - 1
else : # suffix
chk = cur_end
# - - Set scope based on whether scope is negative
if match [ 'scope' ] . startswith ( '!' ) :
scope = match [ 'scope' ] [ 1 : ]
negative = True
else :
scope = match [ 'scope' ]
negative = False
# Let the matching begin
# - - Punctuations
if scope == 'punctuation' : # Conditions : XORd with negative
if ( not ( ( chk < 0 and match [ 'type' ] == 'prefix' ) or ( chk >= len ( fixed_text ) and match [ 'type' ] == 'suffix' ) or validate . is_punctuation ( fixed_text [ chk ] ) ) ^ negative ) :
replace = False
# - - Vowels - - Checks : 1 . Cursor should not be at first character
# - - if prefix or last character if suffix , 2 . Character at chk
# - - should be a vowel . 3 . ' negative ' will invert the value of 1
# - - AND 2
elif scope == 'vowel' :
if ( not ( ( ( chk >= 0 and match [ 'type' ] == 'prefix' ) or ( chk < len ( fixed_text ) and match [ 'type' ] == 'suffix' ) ) and validate . is_vowel ( fixed_text [ chk ] ) ) ^ negative ) :
replace = False
# - - Consonants - - Checks : 1 . Cursor should not be at first
# - - character if prefix or last character if suffix , 2 . Character
# - - at chk should be a consonant . 3 . ' negative ' will invert the
# - - value of 1 AND 2
elif scope == 'consonant' :
if ( not ( ( ( chk >= 0 and match [ 'type' ] == 'prefix' ) or ( chk < len ( fixed_text ) and match [ 'type' ] == 'suffix' ) ) and validate . is_consonant ( fixed_text [ chk ] ) ) ^ negative ) :
replace = False
# - - Exacts
elif scope == 'exact' : # Prepare cursor for exact search
if match [ 'type' ] == 'prefix' :
exact_start = cur - len ( match [ 'value' ] )
exact_end = cur
else : # suffix
exact_start = cur_end
exact_end = cur_end + len ( match [ 'value' ] )
# Validate exact find .
if not validate . is_exact ( match [ 'value' ] , fixed_text , exact_start , exact_end , negative ) :
replace = False
# Return replace , which will be true if none of the checks above match
return replace |
def get_jwt_key_data ( ) :
"""Returns the data for the JWT private key used for encrypting the user login token as a string object
Returns :
` str `""" | global __jwt_data
if __jwt_data :
return __jwt_data
from cloud_inquisitor import config_path
from cloud_inquisitor . config import dbconfig
jwt_key_file = dbconfig . get ( 'jwt_key_file_path' , default = 'ssl/private.key' )
if not os . path . isabs ( jwt_key_file ) :
jwt_key_file = os . path . join ( config_path , jwt_key_file )
with open ( os . path . join ( jwt_key_file ) , 'r' ) as f :
__jwt_data = f . read ( )
return __jwt_data |
def P ( value , bits = None , endian = None , target = None ) :
"""Pack an unsigned pointer for a given target .
Args :
value ( int ) : The value to pack .
bits ( : class : ` ~ pwnypack . target . Target . Bits ` ) : Override the default
word size . If ` ` None ` ` it will look at the word size of
` ` target ` ` .
endian ( : class : ` ~ pwnypack . target . Target . Endian ` ) : Override the default
byte order . If ` ` None ` ` , it will look at the byte order of
the ` ` target ` ` argument .
target ( : class : ` ~ pwnypack . target . Target ` ) : Override the default byte
order . If ` ` None ` ` , it will look at the byte order of
the global : data : ` ~ pwnypack . target . target ` .""" | return globals ( ) [ 'P%d' % _get_bits ( bits , target ) ] ( value , endian = endian , target = target ) |
def simxGetObjectIntParameter ( clientID , objectHandle , parameterID , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual''' | parameterValue = ct . c_int ( )
return c_GetObjectIntParameter ( clientID , objectHandle , parameterID , ct . byref ( parameterValue ) , operationMode ) , parameterValue . value |
def get_source_data_items ( self , data_item : DataItem ) -> typing . List [ DataItem ] :
"""Return the list of data items that are data sources for the data item .
: return : The list of : py : class : ` nion . swift . Facade . DataItem ` objects .
. . versionadded : : 1.0
Scriptable : Yes""" | return [ DataItem ( data_item ) for data_item in self . _document_model . get_source_data_items ( data_item . _data_item ) ] if data_item else None |
def _master_tops ( self , load ) :
'''Return the results from an external node classifier if one is
specified
: param dict load : A payload received from a minion
: return : The results from an external node classifier''' | load = self . __verify_load ( load , ( 'id' , 'tok' ) )
if load is False :
return { }
return self . masterapi . _master_tops ( load , skip_verify = True ) |
def list_ ( device , unit = None ) :
'''Prints partition information of given < device >
CLI Examples :
. . code - block : : bash
salt ' * ' partition . list / dev / sda
salt ' * ' partition . list / dev / sda unit = s
salt ' * ' partition . list / dev / sda unit = kB''' | _validate_device ( device )
if unit :
if unit not in VALID_UNITS :
raise CommandExecutionError ( 'Invalid unit passed to partition.part_list' )
cmd = 'parted -m -s {0} unit {1} print' . format ( device , unit )
else :
cmd = 'parted -m -s {0} print' . format ( device )
out = __salt__ [ 'cmd.run_stdout' ] ( cmd ) . splitlines ( )
ret = { 'info' : { } , 'partitions' : { } }
mode = 'info'
for line in out :
if line in ( 'BYT;' , 'CHS;' , 'CYL;' ) :
continue
cols = line . rstrip ( ';' ) . split ( ':' )
if mode == 'info' :
if 7 <= len ( cols ) <= 8 :
ret [ 'info' ] = { 'disk' : cols [ 0 ] , 'size' : cols [ 1 ] , 'interface' : cols [ 2 ] , 'logical sector' : cols [ 3 ] , 'physical sector' : cols [ 4 ] , 'partition table' : cols [ 5 ] , 'model' : cols [ 6 ] }
if len ( cols ) == 8 :
ret [ 'info' ] [ 'disk flags' ] = cols [ 7 ]
# Older parted ( 2 . x ) doesn ' t show disk flags in the ' print '
# output , and will return a 7 - column output for the info
# line . In these cases we just leave this field out of the
# return dict .
mode = 'partitions'
else :
raise CommandExecutionError ( 'Problem encountered while parsing output from parted' )
else : # Parted ( v3.1 ) have a variable field list in machine
# readable output :
# number : start : end : [ size : ] ( [ file system : name : flags ; ] | [ free ; ] )
# * If units are in CHS ' size ' is not printed .
# * If is a logical partition with PED _ PARTITION _ FREESPACE
# set , the last three fields are replaced with the
# ' free ' text .
fields = [ 'number' , 'start' , 'end' ]
if unit != 'chs' :
fields . append ( 'size' )
if cols [ - 1 ] == 'free' : # Drop the last element from the list
cols . pop ( )
else :
fields . extend ( [ 'file system' , 'name' , 'flags' ] )
if len ( fields ) == len ( cols ) :
ret [ 'partitions' ] [ cols [ 0 ] ] = dict ( six . moves . zip ( fields , cols ) )
else :
raise CommandExecutionError ( 'Problem encountered while parsing output from parted' )
return ret |
def specbits ( self ) :
"""Returns the array of arguments that would be given to
iptables for the current Extension .""" | bits = [ ]
for opt in sorted ( self . __options ) : # handle the case where this is a negated option
m = re . match ( r'^! (.*)' , opt )
if m :
bits . extend ( [ '!' , "--%s" % m . group ( 1 ) ] )
else :
bits . append ( "--%s" % opt )
optval = self . __options [ opt ]
if isinstance ( optval , list ) :
bits . extend ( optval )
else :
bits . append ( optval )
return bits |
def create_name_id_mapping_response ( self , name_id = None , encrypted_id = None , in_response_to = None , issuer = None , sign_response = False , status = None , sign_alg = None , digest_alg = None , ** kwargs ) :
"""protocol for mapping a principal ' s name identifier into a
different name identifier for the same principal .
Done over soap .
: param name _ id :
: param encrypted _ id :
: param in _ response _ to :
: param issuer :
: param sign _ response :
: param status :
: return :""" | # Done over SOAP
ms_args = self . message_args ( )
_resp = NameIDMappingResponse ( name_id , encrypted_id , in_response_to = in_response_to , ** ms_args )
if sign_response :
return self . sign ( _resp , sign_alg = sign_alg , digest_alg = digest_alg )
else :
logger . info ( "Message: %s" , _resp )
return _resp |
def search_results_info ( self ) :
"""Returns the search results info for this command invocation or None .
The search results info object is created from the search results info
file associated with the command invocation . Splunk does not pass the
location of this file by default . You must request it by specifying
these configuration settings in commands . conf :
. . code - block : : python
enableheader = true
requires _ srinfo = true
The : code : ` enableheader ` setting is : code : ` true ` by default . Hence , you
need not set it . The : code : ` requires _ srinfo ` setting is false by
default . Hence , you must set it .
: return : : class : ` SearchResultsInfo ` , if : code : ` enableheader ` and
: code : ` requires _ srinfo ` are both : code : ` true ` . Otherwise , if either
: code : ` enableheader ` or : code : ` requires _ srinfo ` are : code : ` false ` ,
a value of : code : ` None ` is returned .""" | if self . _search_results_info is not None :
return self . _search_results_info
try :
info_path = self . input_header [ 'infoPath' ]
except KeyError :
return None
def convert_field ( field ) :
return ( field [ 1 : ] if field [ 0 ] == '_' else field ) . replace ( '.' , '_' )
def convert_value ( field , value ) :
if field == 'countMap' :
split = value . split ( ';' )
value = dict ( ( key , int ( value ) ) for key , value in zip ( split [ 0 : : 2 ] , split [ 1 : : 2 ] ) )
elif field == 'vix_families' :
value = ElementTree . fromstring ( value )
elif value == '' :
value = None
else :
try :
value = float ( value )
if value . is_integer ( ) :
value = int ( value )
except ValueError :
pass
return value
with open ( info_path , 'rb' ) as f :
from collections import namedtuple
import csv
reader = csv . reader ( f , dialect = 'splunklib.searchcommands' )
fields = [ convert_field ( x ) for x in reader . next ( ) ]
values = [ convert_value ( f , v ) for f , v in zip ( fields , reader . next ( ) ) ]
search_results_info_type = namedtuple ( 'SearchResultsInfo' , fields )
self . _search_results_info = search_results_info_type . _make ( values )
return self . _search_results_info |
def pipe_xpathfetchpage ( context = None , _INPUT = None , conf = None , ** kwargs ) :
"""A source that fetches the content of a given website as DOM nodes or a
string . Loopable .
context : pipe2py . Context object
_ INPUT : pipeforever pipe or an iterable of items or fields
conf : dict
URL - - url object contain the URL to download
xpath - - xpath to extract
html5 - - use html5 parser ?
useAsString - - emit items as string ?
TODOS :
- don ' t retrieve pages larger than 1.5MB
- don ' t retrieve if page is not indexable .
Yields
_ OUTPUT : items""" | conf = DotDict ( conf )
urls = utils . listize ( conf [ 'URL' ] )
for item in _INPUT :
for item_url in urls :
url = utils . get_value ( DotDict ( item_url ) , DotDict ( item ) , ** kwargs )
url = utils . get_abspath ( url )
f = urlopen ( url )
# TODO : it seems that Yahoo ! converts relative links to
# absolute . This needs to be done on the content but seems to
# be a non - trival task python ?
content = unicode ( f . read ( ) , 'utf-8' )
if context and context . verbose :
print '............Content .................'
print content
print '...............EOF...................'
xpath = conf . get ( 'xpath' , ** kwargs )
html5 = conf . get ( 'html5' , ** kwargs ) == 'true'
use_as_string = conf . get ( 'useAsString' , ** kwargs ) == 'true'
tree = html5parser . parse ( f ) if html5 else html . parse ( f )
root = tree . getroot ( )
items = root . xpath ( xpath )
if context and context . verbose :
print 'XPathFetchPage: found count items:' , len ( items )
for etree in items :
i = utils . etree_to_dict ( etree )
if context and context . verbose :
print '--------------item data --------------------'
print i
print '--------------EOF item data ----------------'
if use_as_string :
yield { 'content' : unicode ( i ) }
else :
yield i
if item . get ( 'forever' ) : # _ INPUT is pipeforever and not a loop ,
# so we just yield our item once
break |
def _input_as_list ( self , data ) :
'''Takes the positional arguments as input in a list .
The list input here should be [ query _ file _ path , database _ file _ path ,
output _ file _ path ]''' | query , database , output = data
if ( not isabs ( database ) ) or ( not isabs ( query ) ) or ( not isabs ( output ) ) :
raise ApplicationError ( "Only absolute paths allowed.\n%s" % ', ' . join ( data ) )
self . _database = FilePath ( database )
self . _query = FilePath ( query )
self . _output = ResultPath ( output , IsWritten = True )
# check parameters that can only take a particular set of values
# check combination of databse and query type
if self . Parameters [ '-t' ] . isOn ( ) and self . Parameters [ '-q' ] . isOn ( ) and ( self . Parameters [ '-t' ] . Value , self . Parameters [ '-q' ] . Value ) not in self . _valid_combinations :
error_message = "Invalid combination of database and query " + "types ('%s', '%s').\n" % ( self . Paramters [ '-t' ] . Value , self . Parameters [ '-q' ] . Value )
error_message += "Must be one of: %s\n" % repr ( self . _valid_combinations )
raise ApplicationError ( error_message )
# check database type
if self . Parameters [ '-t' ] . isOn ( ) and self . Parameters [ '-t' ] . Value not in self . _database_types :
error_message = "Invalid database type %s\n" % self . Parameters [ '-t' ] . Value
error_message += "Allowed values: %s\n" % ', ' . join ( self . _database_types )
raise ApplicationError ( error_message )
# check query type
if self . Parameters [ '-q' ] . isOn ( ) and self . Parameters [ '-q' ] . Value not in self . _query_types :
error_message = "Invalid query type %s\n" % self . Parameters [ '-q' ] . Value
error_message += "Allowed values: %s\n" % ', ' . join ( self . _query_types )
raise ApplicationError ( error_message )
# check mask type
if self . Parameters [ '-mask' ] . isOn ( ) and self . Parameters [ '-mask' ] . Value not in self . _mask_types :
error_message = "Invalid mask type %s\n" % self . Parameters [ '-mask' ]
error_message += "Allowed Values: %s\n" % ', ' . join ( self . _mask_types )
raise ApplicationError ( error_message )
# check qmask type
if self . Parameters [ '-qMask' ] . isOn ( ) and self . Parameters [ '-qMask' ] . Value not in self . _mask_types :
error_message = "Invalid qMask type %s\n" % self . Parameters [ '-qMask' ] . Value
error_message += "Allowed values: %s\n" % ', ' . join ( self . _mask_types )
raise ApplicationError ( error_message )
# check repeat type
if self . Parameters [ '-repeats' ] . isOn ( ) and self . Parameters [ '-repeats' ] . Value not in self . _mask_types :
error_message = "Invalid repeat type %s\n" % self . Parameters [ '-repeat' ] . Value
error_message += "Allowed values: %s\n" % ', ' . join ( self . _mask_types )
raise ApplicationError ( error_message )
# check output format
if self . Parameters [ '-out' ] . isOn ( ) and self . Parameters [ '-out' ] . Value not in self . _out_types :
error_message = "Invalid output type %s\n" % self . Parameters [ '-out' ]
error_message += "Allowed values: %s\n" % ', ' . join ( self . _out_types )
raise ApplicationError ( error_message )
return '' |
def process_iter ( proc , cmd = "" ) :
"""helper function to iterate over a process stdout
and report error messages when done""" | try :
for l in proc . stdout :
yield l
finally :
if proc . poll ( ) is None : # there was an exception
return
else :
proc . wait ( )
if proc . returncode not in ( 0 , None , signal . SIGPIPE , signal . SIGPIPE + 128 ) :
sys . stderr . write ( "cmd was:%s\n" % cmd )
sys . stderr . write ( "return code was:%s\n" % proc . returncode )
raise ProcessException ( cmd ) |
def write_project_summary ( samples , qsign_info = None ) :
"""Write project summary information on the provided samples .
write out dirs , genome resources ,""" | work_dir = samples [ 0 ] [ 0 ] [ "dirs" ] [ "work" ]
out_file = os . path . join ( work_dir , "project-summary.yaml" )
upload_dir = ( os . path . join ( work_dir , samples [ 0 ] [ 0 ] [ "upload" ] [ "dir" ] ) if "dir" in samples [ 0 ] [ 0 ] [ "upload" ] else "" )
date = str ( datetime . now ( ) )
prev_samples = _other_pipeline_samples ( out_file , samples )
with open ( out_file , "w" ) as out_handle :
yaml . safe_dump ( { "date" : date } , out_handle , default_flow_style = False , allow_unicode = False )
if qsign_info :
qsign_out = utils . deepish_copy ( qsign_info [ 0 ] )
qsign_out . pop ( "out_dir" , None )
yaml . safe_dump ( { "qsignature" : qsign_out } , out_handle , default_flow_style = False , allow_unicode = False )
yaml . safe_dump ( { "upload" : upload_dir } , out_handle , default_flow_style = False , allow_unicode = False )
yaml . safe_dump ( { "bcbio_system" : samples [ 0 ] [ 0 ] [ "config" ] . get ( "bcbio_system" , "" ) } , out_handle , default_flow_style = False , allow_unicode = False )
yaml . safe_dump ( { "samples" : prev_samples + [ _save_fields ( sample [ 0 ] ) for sample in samples ] } , out_handle , default_flow_style = False , allow_unicode = False )
return out_file |
def __analyses_match ( self , analysisA , analysisB ) :
"""Leiame , kas tegu on duplikaatidega ehk täpselt üht ja sama
morfoloogilist infot sisaldavate analüüsidega .""" | return POSTAG in analysisA and POSTAG in analysisB and analysisA [ POSTAG ] == analysisB [ POSTAG ] and ROOT in analysisA and ROOT in analysisB and analysisA [ ROOT ] == analysisB [ ROOT ] and FORM in analysisA and FORM in analysisB and analysisA [ FORM ] == analysisB [ FORM ] and CLITIC in analysisA and CLITIC in analysisB and analysisA [ CLITIC ] == analysisB [ CLITIC ] and ENDING in analysisA and ENDING in analysisB and analysisA [ ENDING ] == analysisB [ ENDING ] |
def put ( self , key , value , ttl = - 1 ) :
"""Transactional implementation of : func : ` Map . put ( key , value , ttl ) < hazelcast . proxy . map . Map . put > `
The object to be put will be accessible only in the current transaction context till the transaction is
committed .
: param key : ( object ) , the specified key .
: param value : ( object ) , the value to associate with the key .
: param ttl : ( int ) , maximum time in seconds for this entry to stay ( optional ) .
: return : ( object ) , previous value associated with key or ` ` None ` ` if there was no mapping for key .""" | check_not_none ( key , "key can't be none" )
check_not_none ( value , "value can't be none" )
return self . _encode_invoke ( transactional_map_put_codec , key = self . _to_data ( key ) , value = self . _to_data ( value ) , ttl = to_millis ( ttl ) ) |
def get_cmdline_options ( self ) :
"""Return a ' { cmd : { opt : val } } ' map of all command - line options
Option names are all long , but do not include the leading ' - - ' , and
contain dashes rather than underscores . If the option doesn ' t take
an argument ( e . g . ' - - quiet ' ) , the ' val ' is ' None ' .
Note that options provided by config files are intentionally excluded .""" | d = { }
for cmd , opts in self . command_options . items ( ) :
for opt , ( src , val ) in opts . items ( ) :
if src != "command line" :
continue
opt = opt . replace ( '_' , '-' )
if val == 0 :
cmdobj = self . get_command_obj ( cmd )
neg_opt = self . negative_opt . copy ( )
neg_opt . update ( getattr ( cmdobj , 'negative_opt' , { } ) )
for neg , pos in neg_opt . items ( ) :
if pos == opt :
opt = neg
val = None
break
else :
raise AssertionError ( "Shouldn't be able to get here" )
elif val == 1 :
val = None
d . setdefault ( cmd , { } ) [ opt ] = val
return d |
def consume ( self ) : # pragma : no cover
"""start consuming rabbitmq messages""" | print ( ' [*] Waiting for logs. To exit press CTRL+C' )
self . channel . basic_consume ( self . queue_name , self . callback )
self . channel . start_consuming ( ) |
def execute_input_middleware_stream ( self , request , controller ) :
"""Request comes from the controller . Returned is a request .
controller arg is the name of the controller .""" | start_request = request
# either ' http ' or ' cmd ' or ' irc '
controller_name = "" . join ( controller . get_controller_name ( ) . split ( '-' ) [ : 1 ] )
middlewares = list ( self . pre_input_middleware ) + list ( self . input_middleware )
for m in middlewares :
to_execute = getattr ( m ( controller ) , controller_name )
if to_execute :
result = to_execute ( request )
if GiottoControl in type ( result ) . mro ( ) : # a middleware class returned a control object ( redirection , et al . )
# ignore all other middleware classes
return request , result
request = result
return start_request , request |
def TaskAttemptInput ( input , task_attempt ) :
"""Returns the correct Input class for a given
data type and gather mode""" | ( data_type , mode ) = _get_input_info ( input )
if data_type != 'file' :
return NoOpInput ( None , task_attempt )
if mode == 'no_gather' :
return FileInput ( input [ 'data' ] [ 'contents' ] , task_attempt )
else :
assert mode . startswith ( 'gather' )
return FileListInput ( input [ 'data' ] [ 'contents' ] , task_attempt ) |
def OnSecondaryCheckbox ( self , event ) :
"""Top Checkbox event handler""" | self . attrs [ "top" ] = event . IsChecked ( )
self . attrs [ "right" ] = event . IsChecked ( )
post_command_event ( self , self . DrawChartMsg ) |
def save ( self ) :
"""Save profile settings into user profile directory""" | config = self . profiledir + '/config'
if not isdir ( self . profiledir ) :
makedirs ( self . profiledir )
cp = SafeConfigParser ( )
cp . add_section ( 'ssh' )
cp . set ( 'ssh' , 'private_key' , self . ssh_private_key )
cp . set ( 'ssh' , 'public_key' , self . ssh_public_key )
with open ( config , 'w' ) as cfile :
cp . write ( cfile ) |
def get_ndv_b ( b ) :
"""Get NoData value for GDAL band .
If NoDataValue is not set in the band ,
extract upper left and lower right pixel values .
Otherwise assume NoDataValue is 0.
Parameters
b : GDALRasterBand object
This is the input band .
Returns
b _ ndv : float
NoData value""" | b_ndv = b . GetNoDataValue ( )
if b_ndv is None : # Check ul pixel for ndv
ns = b . XSize
nl = b . YSize
ul = float ( b . ReadAsArray ( 0 , 0 , 1 , 1 ) )
# ur = float ( b . ReadAsArray ( ns - 1 , 0 , 1 , 1 ) )
lr = float ( b . ReadAsArray ( ns - 1 , nl - 1 , 1 , 1 ) )
# ll = float ( b . ReadAsArray ( 0 , nl - 1 , 1 , 1 ) )
# Probably better to use 3/4 corner criterion
# if ul = = ur = = lr = = ll :
if np . isnan ( ul ) or ul == lr :
b_ndv = ul
else : # Assume ndv is 0
b_ndv = 0
elif np . isnan ( b_ndv ) :
b_dt = gdal . GetDataTypeName ( b . DataType )
if 'Float' in b_dt :
b_ndv = np . nan
else :
b_ndv = 0
return b_ndv |
def write ( self ) :
"""Set the output pins of the port to the correct state .""" | mask = 0
for pin in self . pins :
if pin . mode == OUTPUT :
if pin . value == 1 :
pin_nr = pin . pin_number - self . port_number * 8
mask |= 1 << int ( pin_nr )
# print ( " type mask " , type ( mask ) )
# print ( " type self . portnumber " , type ( self . port _ number ) )
# print ( " type pinnr " , type ( pin _ nr ) )
msg = bytearray ( [ DIGITAL_MESSAGE + self . port_number , mask % 128 , mask >> 7 ] )
self . board . sp . write ( msg ) |
def _get_xmlparser ( xmlclass = XmlObject , validate = False , resolver = None ) :
"""Initialize an instance of : class : ` lxml . etree . XMLParser ` with appropriate
settings for validation . If validation is requested and the specified
instance of : class : ` XmlObject ` has an XSD _ SCHEMA defined , that will be used .
Otherwise , uses DTD validation . Switched resolver to None to skip validation .""" | if validate :
if hasattr ( xmlclass , 'XSD_SCHEMA' ) and xmlclass . XSD_SCHEMA is not None : # If the schema has already been loaded , use that .
# ( since we accessing the * class * , accessing ' xmlschema ' returns a property ,
# not the initialized schema object we actually want ) .
xmlschema = getattr ( xmlclass , '_xmlschema' , None )
# otherwise , load the schema
if xmlschema is None :
xmlschema = loadSchema ( xmlclass . XSD_SCHEMA )
opts = { 'schema' : xmlschema }
else : # if configured XmlObject does not have a schema defined , assume DTD validation
opts = { 'dtd_validation' : True }
else : # If validation is not requested , then the parsing should fail
# only for well - formedness issues .
# Therefore , we must turn off collect _ ids , otherwise lxml will
# have a problem with duplicate IDs as it collects
# them . However , the XML spec declares ID uniqueness as a
# validation constraint , not a well - formedness
# constraint . ( See https : / / www . w3 . org / TR / xml / # id . )
opts = { "collect_ids" : False }
parser = etree . XMLParser ( ** opts )
if resolver is not None :
parser . resolvers . add ( resolver )
return parser |
def log_pipeline ( self , pl ) :
"""Write a report of the pipeline out to a file""" | from datetime import datetime
from ambry . etl . pipeline import CastColumns
self . build_fs . makedir ( 'pipeline' , allow_recreate = True )
try :
ccp = pl [ CastColumns ]
caster_code = ccp . pretty_code
except Exception as e :
caster_code = str ( e )
templ = u ( """
Pipeline : {}
run time : {}
phase : {}
source name : {}
source table : {}
dest table : {}
========================================================
{}
Pipeline Headers
================
{}
Caster Code
===========
{}
""" )
try :
v = templ . format ( pl . name , str ( datetime . now ( ) ) , pl . phase , pl . source_name , pl . source_table , pl . dest_table , unicode ( pl ) , pl . headers_report ( ) , caster_code )
except UnicodeError as e :
v = ''
self . error ( 'Faled to write pipeline log for pipeline {} ' . format ( pl . name ) )
path = os . path . join ( 'pipeline' , pl . phase + '-' + pl . file_name + '.txt' )
self . build_fs . makedir ( os . path . dirname ( path ) , allow_recreate = True , recursive = True )
# LazyFS should handled differently because of :
# TypeError : lazy _ fs . setcontents ( . . . , encoding = ' utf - 8 ' ) got an unexpected keyword argument ' encoding '
if isinstance ( self . build_fs , LazyFS ) :
self . build_fs . wrapped_fs . setcontents ( path , v , encoding = 'utf8' )
else :
self . build_fs . setcontents ( path , v , encoding = 'utf8' ) |
def dendrogram ( df , method = 'average' , filter = None , n = 0 , p = 0 , sort = None , orientation = None , figsize = None , fontsize = 16 , inline = False ) :
"""Fits a ` scipy ` hierarchical clustering algorithm to the given DataFrame ' s variables and visualizes the results as
a ` scipy ` dendrogram .
The default vertical display will fit up to 50 columns . If more than 50 columns are specified and orientation is
left unspecified the dendrogram will automatically swap to a horizontal display to fit the additional variables .
: param df : The DataFrame whose completeness is being dendrogrammed .
: param method : The distance measure being used for clustering . This is a parameter that is passed to
` scipy . hierarchy ` .
: param filter : The filter to apply to the heatmap . Should be one of " top " , " bottom " , or None ( default ) .
: param n : The cap on the number of columns to include in the filtered DataFrame .
: param p : The cap on the percentage fill of the columns in the filtered DataFrame .
: param sort : The sort to apply to the heatmap . Should be one of " ascending " , " descending " , or None .
: param figsize : The size of the figure to display . This is a ` matplotlib ` parameter which defaults to ` ( 25 , 10 ) ` .
: param fontsize : The figure ' s font size .
: param orientation : The way the dendrogram is oriented . Defaults to top - down if there are less than or equal to 50
columns and left - right if there are more .
: param inline : Whether or not the figure is inline . If it ' s not then instead of getting plotted , this method will
return its figure .
: return : If ` inline ` is False , the underlying ` matplotlib . figure ` object . Else , nothing .""" | if not figsize :
if len ( df . columns ) <= 50 or orientation == 'top' or orientation == 'bottom' :
figsize = ( 25 , 10 )
else :
figsize = ( 25 , ( 25 + len ( df . columns ) - 50 ) * 0.5 )
plt . figure ( figsize = figsize )
gs = gridspec . GridSpec ( 1 , 1 )
ax0 = plt . subplot ( gs [ 0 ] )
df = nullity_filter ( df , filter = filter , n = n , p = p )
df = nullity_sort ( df , sort = sort )
# Link the hierarchical output matrix , figure out orientation , construct base dendrogram .
x = np . transpose ( df . isnull ( ) . astype ( int ) . values )
z = hierarchy . linkage ( x , method )
if not orientation :
if len ( df . columns ) > 50 :
orientation = 'left'
else :
orientation = 'bottom'
hierarchy . dendrogram ( z , orientation = orientation , labels = df . columns . tolist ( ) , distance_sort = 'descending' , link_color_func = lambda c : 'black' , leaf_font_size = fontsize , ax = ax0 )
# Remove extraneous default visual elements .
ax0 . set_aspect ( 'auto' )
ax0 . grid ( b = False )
if orientation == 'bottom' :
ax0 . xaxis . tick_top ( )
ax0 . xaxis . set_ticks_position ( 'none' )
ax0 . yaxis . set_ticks_position ( 'none' )
ax0 . spines [ 'top' ] . set_visible ( False )
ax0 . spines [ 'right' ] . set_visible ( False )
ax0 . spines [ 'bottom' ] . set_visible ( False )
ax0 . spines [ 'left' ] . set_visible ( False )
ax0 . patch . set_visible ( False )
# Set up the categorical axis labels and draw .
if orientation == 'bottom' :
ax0 . set_xticklabels ( ax0 . xaxis . get_majorticklabels ( ) , rotation = 45 , ha = 'left' )
elif orientation == 'top' :
ax0 . set_xticklabels ( ax0 . xaxis . get_majorticklabels ( ) , rotation = 45 , ha = 'right' )
if orientation == 'bottom' or orientation == 'top' :
ax0 . tick_params ( axis = 'y' , labelsize = int ( fontsize / 16 * 20 ) )
else :
ax0 . tick_params ( axis = 'x' , labelsize = int ( fontsize / 16 * 20 ) )
if inline :
plt . show ( )
else :
return ax0 |
def update_states_geo_zone_by_id ( cls , states_geo_zone_id , states_geo_zone , ** kwargs ) :
"""Update StatesGeoZone
Update attributes of StatesGeoZone
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . update _ states _ geo _ zone _ by _ id ( states _ geo _ zone _ id , states _ geo _ zone , async = True )
> > > result = thread . get ( )
: param async bool
: param str states _ geo _ zone _ id : ID of statesGeoZone to update . ( required )
: param StatesGeoZone states _ geo _ zone : Attributes of statesGeoZone to update . ( required )
: return : StatesGeoZone
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _update_states_geo_zone_by_id_with_http_info ( states_geo_zone_id , states_geo_zone , ** kwargs )
else :
( data ) = cls . _update_states_geo_zone_by_id_with_http_info ( states_geo_zone_id , states_geo_zone , ** kwargs )
return data |
def delete ( self , id ) :
"""Delete a component by id""" | id = self . as_id ( id )
response = self . http . delete ( '%s/%s' % ( self . api_url , id ) , auth = self . auth )
response . raise_for_status ( ) |
def to_json ( graph : BELGraph ) -> Mapping [ str , Any ] :
"""Convert this graph to a Node - Link JSON object .""" | graph_json_dict = node_link_data ( graph )
# Convert annotation list definitions ( which are sets ) to canonicalized / sorted lists
graph_json_dict [ 'graph' ] [ GRAPH_ANNOTATION_LIST ] = { keyword : list ( sorted ( values ) ) for keyword , values in graph_json_dict [ 'graph' ] . get ( GRAPH_ANNOTATION_LIST , { } ) . items ( ) }
# Convert set to list
graph_json_dict [ 'graph' ] [ GRAPH_UNCACHED_NAMESPACES ] = list ( graph_json_dict [ 'graph' ] . get ( GRAPH_UNCACHED_NAMESPACES , [ ] ) )
return graph_json_dict |
def next_k_array ( a ) :
"""Given an array ` a ` of k distinct nonnegative integers , sorted in
ascending order , return the next k - array in the lexicographic
ordering of the descending sequences of the elements [ 1 ] _ . ` a ` is
modified in place .
Parameters
a : ndarray ( int , ndim = 1)
Array of length k .
Returns
a : ndarray ( int , ndim = 1)
View of ` a ` .
Examples
Enumerate all the subsets with k elements of the set { 0 , . . . , n - 1 } .
> > > n , k = 4 , 2
> > > a = np . arange ( k )
> > > while a [ - 1 ] < n :
. . . print ( a )
. . . a = next _ k _ array ( a )
[0 1]
[0 2]
[1 2]
[0 3]
[1 3]
[2 3]
References
. . [ 1 ] ` Combinatorial number system
< https : / / en . wikipedia . org / wiki / Combinatorial _ number _ system > ` _ ,
Wikipedia .""" | # Logic taken from Algotirhm T in D . Knuth , The Art of Computer
# Programming , Section 7.2.1.3 " Generating All Combinations " .
k = len ( a )
if k == 1 or a [ 0 ] + 1 < a [ 1 ] :
a [ 0 ] += 1
return a
a [ 0 ] = 0
i = 1
x = a [ i ] + 1
while i < k - 1 and x == a [ i + 1 ] :
i += 1
a [ i - 1 ] = i - 1
x = a [ i ] + 1
a [ i ] = x
return a |
def text ( self , selector ) :
"""Return text result that executed by given css selector
: param selector : ` str ` css selector
: return : ` list ` or ` None `""" | result = self . __bs4 . select ( selector )
return [ r . get_text ( ) for r in result ] if result . __len__ ( ) > 1 else result [ 0 ] . get_text ( ) if result . __len__ ( ) > 0 else None |
def get_media_detail_output_interface_interface_type ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_media_detail = ET . Element ( "get_media_detail" )
config = get_media_detail
output = ET . SubElement ( get_media_detail , "output" )
interface = ET . SubElement ( output , "interface" )
interface_name_key = ET . SubElement ( interface , "interface-name" )
interface_name_key . text = kwargs . pop ( 'interface_name' )
interface_type = ET . SubElement ( interface , "interface-type" )
interface_type . text = kwargs . pop ( 'interface_type' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def values ( self , * args : str , ** kwargs : str ) -> "ValuesQuery" :
"""Make QuerySet return dicts instead of objects .""" | fields_for_select = { }
# type : Dict [ str , str ]
for field in args :
if field in fields_for_select :
raise FieldError ( "Duplicate key {}" . format ( field ) )
fields_for_select [ field ] = field
for return_as , field in kwargs . items ( ) :
if return_as in fields_for_select :
raise FieldError ( "Duplicate key {}" . format ( return_as ) )
fields_for_select [ return_as ] = field
return ValuesQuery ( db = self . _db , model = self . model , q_objects = self . _q_objects , fields_for_select = fields_for_select , distinct = self . _distinct , limit = self . _limit , offset = self . _offset , orderings = self . _orderings , annotations = self . _annotations , custom_filters = self . _custom_filters , ) |
def show_vcs_output_vcs_nodes_vcs_node_info_node_vcs_mode ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
show_vcs = ET . Element ( "show_vcs" )
config = show_vcs
output = ET . SubElement ( show_vcs , "output" )
vcs_nodes = ET . SubElement ( output , "vcs-nodes" )
vcs_node_info = ET . SubElement ( vcs_nodes , "vcs-node-info" )
node_vcs_mode = ET . SubElement ( vcs_node_info , "node-vcs-mode" )
node_vcs_mode . text = kwargs . pop ( 'node_vcs_mode' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def request ( self , path , args = None , post_args = None , files = None , method = None ) :
"""Fetches the given path in the Graph API .
We translate args to a valid query string . If post _ args is
given , we send a POST request to the given path with the given
arguments .""" | if args is None :
args = dict ( )
if post_args is not None :
method = "POST"
# Add ` access _ token ` to post _ args or args if it has not already been
# included .
if self . access_token : # If post _ args exists , we assume that args either does not exists
# or it does not need ` access _ token ` .
if post_args and "access_token" not in post_args :
post_args [ "access_token" ] = self . access_token
elif "access_token" not in args :
args [ "access_token" ] = self . access_token
try :
response = self . session . request ( method or "GET" , FACEBOOK_GRAPH_URL + path , timeout = self . timeout , params = args , data = post_args , proxies = self . proxies , files = files , )
except requests . HTTPError as e :
response = json . loads ( e . read ( ) )
raise GraphAPIError ( response )
headers = response . headers
if "json" in headers [ "content-type" ] :
result = response . json ( )
elif "image/" in headers [ "content-type" ] :
mimetype = headers [ "content-type" ]
result = { "data" : response . content , "mime-type" : mimetype , "url" : response . url , }
elif "access_token" in parse_qs ( response . text ) :
query_str = parse_qs ( response . text )
if "access_token" in query_str :
result = { "access_token" : query_str [ "access_token" ] [ 0 ] }
if "expires" in query_str :
result [ "expires" ] = query_str [ "expires" ] [ 0 ]
else :
raise GraphAPIError ( response . json ( ) )
else :
raise GraphAPIError ( "Maintype was not text, image, or querystring" )
if result and isinstance ( result , dict ) and result . get ( "error" ) :
raise GraphAPIError ( result )
return result |
def vertical_line ( self , x : Union [ int , float ] , y1 : Union [ int , float ] , y2 : Union [ int , float ] , emphasize : bool = False ) -> None :
"""Adds a line from ( x , y1 ) to ( x , y2 ) .""" | y1 , y2 = sorted ( [ y1 , y2 ] )
self . vertical_lines . append ( _VerticalLine ( x , y1 , y2 , emphasize ) ) |
def get_unit_hostnames ( self , units ) :
"""Return a dict of juju unit names to hostnames .""" | host_names = { }
for unit in units :
host_names [ unit . info [ 'unit_name' ] ] = str ( unit . file_contents ( '/etc/hostname' ) . strip ( ) )
self . log . debug ( 'Unit host names: {}' . format ( host_names ) )
return host_names |
def compile_rules ( environment ) :
"""Compiles all the rules from the environment into a list of rules .""" | e = re . escape
rules = [ ( len ( environment . comment_start_string ) , 'comment' , e ( environment . comment_start_string ) ) , ( len ( environment . block_start_string ) , 'block' , e ( environment . block_start_string ) ) , ( len ( environment . variable_start_string ) , 'variable' , e ( environment . variable_start_string ) ) ]
if environment . line_statement_prefix is not None :
rules . append ( ( len ( environment . line_statement_prefix ) , 'linestatement' , r'^\s*' + e ( environment . line_statement_prefix ) ) )
if environment . line_comment_prefix is not None :
rules . append ( ( len ( environment . line_comment_prefix ) , 'linecomment' , r'(?:^|(?<=\S))[^\S\r\n]*' + e ( environment . line_comment_prefix ) ) )
return [ x [ 1 : ] for x in sorted ( rules , reverse = True ) ] |
def _get_tracker ( self , resource ) :
"""Return the resource tracker that is tracking ` ` resource ` ` .
: param resource : A resource .
: return : A resource tracker .
: rtype : : class : ` _ ResourceTracker `""" | with self . _lock :
for rt in self . _reference_queue :
if rt is not None and resource is rt . resource :
return rt
raise UnknownResourceError ( 'Resource not created by pool' ) |
def close ( self , signalnum = None , frame = None ) :
self . _running = False
"""Closes all currently open Tail objects""" | self . _log_debug ( "Closing all tail objects" )
self . _active = False
for fid in self . _tails :
self . _tails [ fid ] . close ( )
for n in range ( 0 , self . _number_of_consumer_processes ) :
if self . _proc [ n ] is not None and self . _proc [ n ] . is_alive ( ) :
self . _logger . debug ( "Terminate Process: " + str ( n ) )
self . _proc [ n ] . terminate ( )
self . _proc [ n ] . join ( ) |
def get_corrector_f ( rinput , meta , ins , datamodel ) :
"""Corrector for intensity flat""" | from emirdrp . processing . flatfield import FlatFieldCorrector
flat_info = meta [ 'master_flat' ]
with rinput . master_flat . open ( ) as hdul :
_logger . info ( 'loading intensity flat' )
_logger . debug ( 'flat info: %s' , flat_info )
mflat = hdul [ 0 ] . data
# Check NaN and Ceros
mask1 = mflat < 0
mask2 = ~ numpy . isfinite ( mflat )
if numpy . any ( mask1 ) :
_logger . warning ( 'flat has %d values below 0' , mask1 . sum ( ) )
if numpy . any ( mask2 ) :
_logger . warning ( 'flat has %d NaN' , mask2 . sum ( ) )
flat_corrector = FlatFieldCorrector ( mflat , datamodel = datamodel , calibid = datamodel . get_imgid ( hdul ) )
return flat_corrector |
def write_header ( self , out_strm , delim , f1_num_fields , f2_num_fields , f1_header = None , f2_header = None , missing_val = None ) :
"""Write the header for a joined file . If headers are provided for one or more
of the input files , then a header is generated for the output file .
Otherwise , this does not output anything .
: param out _ strm : write to this stream
: param delim :
: param f1 _ num _ fields : the number of columns in the first file
: param f2 _ num _ fields : the number of columns in the second file
: param f1 _ header :
: param f2 _ header :
: param missing _ val :""" | mm = f1_header != f2_header
one_none = f1_header is None or f2_header is None
if mm and one_none and missing_val is None :
raise InvalidHeaderError ( "Cannot generate output header when one " + "input file is missing a header and no " + "missing value was provided to replace " + "unknown entries." )
if f1_header is not None and f2_header is not None :
out_strm . write ( delim . join ( f1_header ) + delim + delim . join ( f2_header ) + "\n" )
elif f1_header is None and f2_header is not None :
dummy_h = f1_num_fields * [ missing_val ]
out_strm . write ( delim . join ( dummy_h ) + delim + delim . join ( f2_header ) + "\n" )
elif f1_header is not None and f2_header is None :
dummy_h = f2_num_fields * [ missing_val ]
out_strm . write ( delim . join ( f1_header ) + delim + delim . join ( dummy_h ) + "\n" ) |
def getEyeOutputViewport ( self , eEye ) :
"""Gets the viewport in the frame buffer to draw the output of the distortion into""" | fn = self . function_table . getEyeOutputViewport
pnX = c_uint32 ( )
pnY = c_uint32 ( )
pnWidth = c_uint32 ( )
pnHeight = c_uint32 ( )
fn ( eEye , byref ( pnX ) , byref ( pnY ) , byref ( pnWidth ) , byref ( pnHeight ) )
return pnX . value , pnY . value , pnWidth . value , pnHeight . value |
def get_map_values ( self , lons , lats , ibin = None ) :
"""Return the indices in the flat array corresponding to a set of coordinates
Parameters
lons : array - like
' Longitudes ' ( RA or GLON )
lats : array - like
' Latitidues ' ( DEC or GLAT )
ibin : int or array - like
Extract data only for a given energy bin . None - > extract data for all bins
Returns
vals : numpy . ndarray ( ( n ) )
Values of pixels in the flattened map , np . nan used to flag
coords outside of map""" | theta = np . pi / 2. - np . radians ( lats )
phi = np . radians ( lons )
pix = hp . ang2pix ( self . hpx . nside , theta , phi , nest = self . hpx . nest )
if self . data . ndim == 2 :
return self . data [ : , pix ] if ibin is None else self . data [ ibin , pix ]
else :
return self . data [ pix ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.