signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _build_filename_from_browserstack_json ( j ) :
"""Build a useful filename for an image from the screenshot json metadata""" | filename = ''
device = j [ 'device' ] if j [ 'device' ] else 'Desktop'
if j [ 'state' ] == 'done' and j [ 'image_url' ] :
detail = [ device , j [ 'os' ] , j [ 'os_version' ] , j [ 'browser' ] , j [ 'browser_version' ] , '.jpg' ]
filename = '_' . join ( item . replace ( " " , "_" ) for item in detail if item )
else :
print 'screenshot timed out, ignoring this result'
return filename |
def login ( self , username , password , token ) :
'''Login to Salesforce . com and starts a client session .
Unlike other toolkits , token is a separate parameter , because
Salesforce doesn ' t explicitly tell you to append it when it gives
you a login error . Folks that are new to the API may not know this .
' username ' : Username
' password ' : Password
' token ' : Token
return LoginResult''' | self . _setHeaders ( 'login' )
result = self . _sforce . service . login ( username , password + token )
# set session header
header = self . generateHeader ( 'SessionHeader' )
header . sessionId = result [ 'sessionId' ]
self . setSessionHeader ( header )
self . _sessionId = result [ 'sessionId' ]
self . _userId = result [ 'userId' ]
self . _metadataServerUrl = result [ 'metadataServerUrl' ]
# change URL to point from test . salesforce . com to something like cs2 - api . salesforce . com
self . _setEndpoint ( result [ 'serverUrl' ] )
# na0 . salesforce . com ( a . k . a . ssl . salesforce . com ) requires ISO - 8859-1 instead of UTF - 8
if 'ssl.salesforce.com' in result [ 'serverUrl' ] or 'na0.salesforce.com' in result [ 'serverUrl' ] : # currently , UTF - 8 is hard - coded in Suds , can ' t implement this yet
pass
return result |
def List ( self , * branches , ** kwargs ) :
"""While ` Seq ` is sequential , ` phi . dsl . Expression . List ` allows you to split the computation and get back a list with the result of each path . While the list literal should be the most incarnation of this expresion , it can actually be any iterable ( implements ` _ _ iter _ _ ` ) that is not a tuple and yields a valid expresion .
The expression
k = List ( f , g )
is equivalent to
k = lambda x : [ f ( x ) , g ( x ) ]
In general , the following rules apply after compilation :
* * General Branching * *
List ( f0 , f1 , . . . , fn )
is equivalent to
lambda x : [ f0 ( x ) , f1 ( x ) , . . . , fn ( x ) ]
* * Composing & Branching * *
It is interesting to see how braching interacts with composing . The expression
Seq ( f , List ( g , h ) )
is * almost * equivalent to
List ( Seq ( f , g ) , Seq ( f , h ) )
As you see its as if ` f ` where distributed over the List . We say * almost * because their implementation is different
def _ lambda ( x ) :
x = f ( x )
return [ g ( x ) , h ( x ) ]
vs
lambda x : [ g ( f ( x ) ) , h ( f ( x ) ) ]
As you see ` f ` is only executed once in the first one . Both should yield the same result if ` f ` is a pure function .
# # # Examples
form phi import P , List
avg _ word _ length = P . Pipe (
"1 22 333 " ,
lambda s : s . split ( ' ' ) , # [ ' 1 ' , ' 22 ' , ' 333 ' ]
lambda l : map ( len , l ) , # [ 1 , 2 , 3]
List (
sum # 1 + 2 + 3 = = 6
len # len ( [ 1 , 2 , 3 ] ) = = 3
lambda l : l [ 0 ] / l [ 1 ] # sum / len = = 6 / 3 = = 2
assert avg _ word _ length = = 2
The previous could also be done more briefly like this
form phi import P , Obj , List
avg _ word _ length = P . Pipe (
"1 22 333 " , Obj
. split ( ' ' ) # [ ' 1 ' , ' 22 ' , ' 333 ' ]
. map ( len ) # [ 1 , 2 , 3]
. List (
sum # sum ( [ 1 , 2 , 3 ] ) = = 6
len # len ( [ 1 , 2 , 3 ] ) = = 3
P [ 0 ] / P [ 1 ] # 6 / 3 = = 2
assert avg _ word _ length = = 2
In the example above the last expression
P [ 0 ] / P [ 1]
works for a couple of reasons
1 . The previous expression returns a list
2 . In general the expression ` P [ x ] ` compiles to a function with the form ` lambda obj : obj [ x ] `
3 . The class ` Expression ` ( the class from which the object ` P ` inherits ) overrides most operators to create functions easily . For example , the expression
( P * 2 ) / ( P + 1)
compile to a function of the form
lambda x : ( x * 2 ) / ( x + 1)
Check out the documentatio for Phi [ lambdas ] ( https : / / cgarciae . github . io / phi / lambdas . m . html ) .""" | gs = [ _parse ( code ) . _f for code in branches ]
def h ( x , state ) :
ys = [ ]
for g in gs :
y , state = g ( x , state )
ys . append ( y )
return ( ys , state )
return self . __then__ ( h , ** kwargs ) |
def _adjust_width ( self ) :
"""Shrinks bar if number of iterations is less than the bar width""" | if self . bar_width > self . max_iter :
self . bar_width = int ( self . max_iter ) |
def _calc ( self , x , y ) :
"""List based implementation of binary tree algorithm for concordance
measure after : cite : ` Christensen2005 ` .""" | x = np . array ( x )
y = np . array ( y )
n = len ( y )
perm = list ( range ( n ) )
perm . sort ( key = lambda a : ( x [ a ] , y [ a ] ) )
vals = y [ perm ]
ExtraY = 0
ExtraX = 0
ACount = 0
BCount = 0
CCount = 0
DCount = 0
ECount = 0
DCount = 0
Concordant = 0
Discordant = 0
# ids for left child
li = [ None ] * ( n - 1 )
# ids for right child
ri = [ None ] * ( n - 1 )
# number of left descendants for a node
ld = np . zeros ( n )
# number of values equal to value i
nequal = np . zeros ( n )
for i in range ( 1 , n ) :
NumBefore = 0
NumEqual = 1
root = 0
x0 = x [ perm [ i - 1 ] ]
y0 = y [ perm [ i - 1 ] ]
x1 = x [ perm [ i ] ]
y1 = y [ perm [ i ] ]
if x0 != x1 :
DCount = 0
ECount = 1
else :
if y0 == y1 :
ECount += 1
else :
DCount += ECount
ECount = 1
root = 0
inserting = True
while inserting :
current = y [ perm [ i ] ]
if current > y [ perm [ root ] ] : # right branch
NumBefore += 1 + ld [ root ] + nequal [ root ]
if ri [ root ] is None : # insert as right child to root
ri [ root ] = i
inserting = False
else :
root = ri [ root ]
elif current < y [ perm [ root ] ] : # increment number of left descendants
ld [ root ] += 1
if li [ root ] is None : # insert as left child to root
li [ root ] = i
inserting = False
else :
root = li [ root ]
elif current == y [ perm [ root ] ] :
NumBefore += ld [ root ]
NumEqual += nequal [ root ] + 1
nequal [ root ] += 1
inserting = False
ACount = NumBefore - DCount
BCount = NumEqual - ECount
CCount = i - ( ACount + BCount + DCount + ECount - 1 )
ExtraY += DCount
ExtraX += BCount
Concordant += ACount
Discordant += CCount
cd = Concordant + Discordant
num = Concordant - Discordant
tau = num / np . sqrt ( ( cd + ExtraX ) * ( cd + ExtraY ) )
v = ( 4. * n + 10 ) / ( 9. * n * ( n - 1 ) )
z = tau / np . sqrt ( v )
pval = erfc ( np . abs ( z ) / 1.4142136 )
# follow scipy
return tau , pval , Concordant , Discordant , ExtraX , ExtraY |
def is_the_same_indel ( self , other_record , ref_seq ) :
'''Returns True iff this record and other _ record are the " same "
indel . At repeats , there is more than one way to report the same
variant . eg :
pos = 42 , ref = CAAA , alt = CAA
pos = 43 , ref = AAA , alt = AA
pos = 44 , ref = AA , alt = A''' | if self . CHROM != other_record . CHROM or len ( self . ALT ) > 1 or len ( other_record . ALT ) > 1 or self . is_snp ( ) or other_record . is_snp ( ) :
return False
# The number of nuleotides that have been added or removed
# is a necessary condition of the indels being the same ,
# so check that before devling into the actual sequences
if ( len ( self . REF ) - len ( self . ALT [ 0 ] ) ) != ( len ( other_record . REF ) - len ( other_record . ALT [ 0 ] ) ) :
return False
# make records that start and end in the same place .
# Then compare the REF and ALT sequences
record1 = copy . copy ( self )
record2 = copy . copy ( other_record )
new_start = min ( self . POS , other_record . POS )
new_end = max ( self . ref_end_pos ( ) , other_record . ref_end_pos ( ) )
record1 . add_flanking_seqs ( ref_seq , new_start , new_end )
record2 . add_flanking_seqs ( ref_seq , new_start , new_end )
return record1 . REF == record2 . REF and record1 . ALT == record2 . ALT |
def cnst_AT ( self , Y ) :
r"""Compute : math : ` A ^ T \ mathbf { y } ` . In this case
: math : ` A ^ T \ mathbf { y } = ( I \ ; \ ; \ Gamma _ 0 ^ T \ ; \ ; \ Gamma _ 1 ^ T \ ; \ ;
\ ldots ) \ mathbf { y } ` .""" | return self . cnst_A0T ( self . block_sep0 ( Y ) ) + np . sum ( self . cnst_A1T ( self . block_sep1 ( Y ) ) , axis = - 1 ) |
def checkGeneTreeMatchesSpeciesTree ( speciesTree , geneTree , processID ) :
"""Function to check ids in gene tree all match nodes in species tree""" | def fn ( tree , l ) :
if tree . internal :
fn ( tree . left , l )
fn ( tree . right , l )
else :
l . append ( processID ( tree . iD ) )
l = [ ]
fn ( speciesTree , l )
l2 = [ ]
fn ( geneTree , l2 )
for i in l2 : # print " node " , i , l
assert i in l |
def create_inline ( project , resource , offset ) :
"""Create a refactoring object for inlining
Based on ` resource ` and ` offset ` it returns an instance of
` InlineMethod ` , ` InlineVariable ` or ` InlineParameter ` .""" | pyname = _get_pyname ( project , resource , offset )
message = 'Inline refactoring should be performed on ' 'a method, local variable or parameter.'
if pyname is None :
raise rope . base . exceptions . RefactoringError ( message )
if isinstance ( pyname , pynames . ImportedName ) :
pyname = pyname . _get_imported_pyname ( )
if isinstance ( pyname , pynames . AssignedName ) :
return InlineVariable ( project , resource , offset )
if isinstance ( pyname , pynames . ParameterName ) :
return InlineParameter ( project , resource , offset )
if isinstance ( pyname . get_object ( ) , pyobjects . PyFunction ) :
return InlineMethod ( project , resource , offset )
else :
raise rope . base . exceptions . RefactoringError ( message ) |
def format_string ( string , foreground = None , background = None , reset = True , bold = False , faint = False , italic = False , underline = False , blink = False , inverted = False ) :
"""Returns a Unicode string formatted with an ANSI escape sequence .
string
String to format
foreground
Foreground colour to use . Accepted types : None , int ( xterm
palette ID ) , tuple ( RGB , RGBA ) , Colour
background
Background colour to use . Accepted types : None , int ( xterm
palette ID ) , tuple ( RGB , RGBA ) , Colour
reset
Reset the formatting at the end ( default : True )
bold
Enable bold text ( default : False )
faint
Enable faint text ( default : False )
italic
Enable italic text ( default : False )
underline
Enable underlined text ( default : False )
blink
Enable blinky text ( default : False )
inverted
Enable inverted text ( default : False )""" | colour_format = format_escape ( foreground , background , bold , faint , italic , underline , blink , inverted )
reset_format = '' if not reset else ANSI_FORMAT_RESET
return '{}{}{}' . format ( colour_format , string , reset_format ) |
def _get_connection ( self ) :
"""Returns connection to the postgres database .
Returns :
connection to postgres database who stores mpr data .""" | if not getattr ( self , '_connection' , None ) :
logger . debug ( 'Creating new connection.\n dsn: {}' . format ( self . _dsn ) )
d = parse_url_to_dict ( self . _dsn )
self . _connection = psycopg2 . connect ( database = d [ 'path' ] . strip ( '/' ) , user = d [ 'username' ] , password = d [ 'password' ] , port = d [ 'port' ] , host = d [ 'hostname' ] )
# It takes some time to find the way how to get raw connection from sqlalchemy . So ,
# I leave the commented code .
# self . _ engine = create _ engine ( self . _ dsn )
# self . _ connection = self . _ engine . raw _ connection ( )
return self . _connection |
def decode ( self , codes ) :
"""Given PQ - codes , reconstruct original D - dimensional vectors via : func : ` PQ . decode ` ,
and applying an inverse - rotation .
Args :
codes ( np . ndarray ) : PQ - cdoes with shape = ( N , M ) and dtype = self . code _ dtype .
Each row is a PQ - code
Returns :
np . ndarray : Reconstructed vectors with shape = ( N , D ) and dtype = np . float32""" | # Because R is a rotation matrix ( R ^ t * R = I ) , R ^ - 1 should be R ^ t
return self . pq . decode ( codes ) @ self . R . T |
def _resource_index ( self , resource ) :
"""Get index for given resource .
by default it will be ` self . index ` , but it can be overriden via app . config
: param resource : resource name""" | datasource = self . get_datasource ( resource )
indexes = self . _resource_config ( resource , 'INDEXES' ) or { }
default_index = self . _resource_config ( resource , 'INDEX' )
return indexes . get ( datasource [ 0 ] , default_index ) |
def ping ( self ) :
"""Check connectivity to InfluxDB .
: returns : The version of the InfluxDB the client is connected to""" | response = self . request ( url = "ping" , method = 'GET' , expected_response_code = 204 )
return response . headers [ 'X-Influxdb-Version' ] |
def _get_gene_info ( self , limit ) :
"""Currently loops through the gene _ info file and
creates the genes as classes , typed with SO . It will add their label ,
any alternate labels as synonyms , alternate ids as equivlaent classes .
HPRDs get added as protein products .
The chromosome and chr band get added as blank node regions ,
and the gene is faldo : located
on the chr band .
: param limit :
: return :""" | src_key = 'gene_info'
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
geno = Genotype ( graph )
model = Model ( graph )
# not unzipping the file
LOG . info ( "Processing 'Gene Info' records" )
line_counter = 0
gene_info = '/' . join ( ( self . rawdir , self . files [ src_key ] [ 'file' ] ) )
LOG . info ( "FILE: %s" , gene_info )
# Add taxa and genome classes for those in our filter
band_regex = re . compile ( r'[0-9A-Z]+[pq](\d+)?(\.\d+)?$' )
for tax_num in self . tax_ids :
tax_id = ':' . join ( ( 'NCBITaxon' , tax_num ) )
# tax label can get added elsewhere
geno . addGenome ( tax_id , tax_num )
# label added elsewhere
model . addClassToGraph ( tax_id , None )
col = self . files [ src_key ] [ 'columns' ]
with gzip . open ( gene_info , 'rb' ) as tsv :
row = tsv . readline ( ) . decode ( ) . strip ( ) . split ( '\t' )
row [ 0 ] = row [ 0 ] [ 1 : ]
# strip comment
if col != row :
LOG . info ( '%s\nExpected Headers:\t%s\nRecived Headers:\t%s\n' , src_key , col , row )
LOG . info ( set ( col ) - set ( row ) )
for line in tsv :
line = line . strip ( )
line_counter += 1
if line [ 0 ] == '#' : # skip comments
continue
row = line . decode ( ) . strip ( ) . split ( '\t' )
# # # set filter = None in init if you don ' t want to have a filter
# if self . id _ filter is not None :
# if ( ( self . id _ filter = = ' taxids ' and \
# ( tax _ num not in self . tax _ ids ) )
# or ( self . id _ filter = = ' geneids ' and \
# ( int ( gene _ num ) not in self . gene _ ids ) ) ) :
# continue
# # # # # end filter
gene_num = row [ col . index ( 'GeneID' ) ]
if self . test_mode and int ( gene_num ) not in self . gene_ids :
continue
tax_num = row [ col . index ( 'tax_id' ) ]
if not self . test_mode and tax_num not in self . tax_ids :
continue
tax_id = ':' . join ( ( 'NCBITaxon' , tax_num ) )
gene_id = ':' . join ( ( 'NCBIGene' , gene_num ) )
gtype = row [ col . index ( 'type_of_gene' ) ] . strip ( )
gene_type_id = self . resolve ( gtype )
symbol = row [ col . index ( 'Symbol' ) ]
if symbol == 'NEWENTRY' :
label = None
else :
label = symbol
# sequence feature , not a gene
if gene_type_id == self . globaltt [ 'sequence_feature' ] :
self . class_or_indiv [ gene_id ] = 'I'
else :
self . class_or_indiv [ gene_id ] = 'C'
if not self . test_mode and limit is not None and line_counter > limit :
continue
desc = row [ col . index ( 'description' ) ]
if self . class_or_indiv [ gene_id ] == 'C' :
model . addClassToGraph ( gene_id , label , gene_type_id , desc )
# NCBI will be the default leader ( for non mods ) ,
# so we will not add the leader designation here .
else :
model . addIndividualToGraph ( gene_id , label , gene_type_id , desc )
# in this case , they aren ' t genes .
# so we want someone else to be the leader
name = row [ col . index ( 'Full_name_from_nomenclature_authority' ) ]
if name != '-' :
model . addSynonym ( gene_id , name )
synonyms = row [ col . index ( 'Synonyms' ) ] . strip ( )
if synonyms != '-' :
for syn in synonyms . split ( '|' ) :
model . addSynonym ( gene_id , syn . strip ( ) , model . globaltt [ 'has_related_synonym' ] )
other_designations = row [ col . index ( 'Other_designations' ) ] . strip ( )
if other_designations != '-' :
for syn in other_designations . split ( '|' ) :
model . addSynonym ( gene_id , syn . strip ( ) , model . globaltt [ 'has_related_synonym' ] )
dbxrefs = row [ col . index ( 'dbXrefs' ) ] . strip ( )
if dbxrefs != '-' :
self . _add_gene_equivalencies ( dbxrefs , gene_id , tax_id )
# edge cases of id | symbol | chr | map _ loc :
# 263 AMD1P2 X | Y with Xq28 and Yq12
# 438 ASMT X | Y with Xp22.3 or Yp11.3 # in PAR
# no idea why there ' s two bands listed - possibly 2 assemblies
# 419 ART3 4 with 4q21.1 | 4p15.1 - p14
# 28227 PPP2R3B X | Y Xp22.33 ; Yp11.3 # in PAR
# this is of " unknown " type = = susceptibility
# 619538 OMS 10 | 19 | 3 10q26.3;19q13.42 - q13.43;3p25.3
# unlocated scaffold
# 101928066 LOC101928066 1 | Un - \
# mouse - - > 2C3
# 11435 Chrna1 2 2 C3 | 2 43.76 cM
# mouse - - > 11B1.1
# 11548 Adra1b 11 11 B1.1 | 11 25.81 cM
# 11717 Ampd3 7 7 57.85 cM | 7 E2 - E3 # mouse
# 14421 B4galnt1 10 10 D3 | 10 74.5 cM # mouse
# 323212 wu : fb92e12 19 | 20 - # fish
# 323368 ints10 6 | 18 - # fish
# 323666 wu : fc06e02 11 | 23 - # fish
# feel that the chr placement can ' t be trusted in this table
# when there is > 1 listed
# with the exception of human X | Y ,
# we will only take those that align to one chr
# FIXME remove the chr mapping below
# when we pull in the genomic coords
chrom = row [ col . index ( 'chromosome' ) ] . strip ( )
if chrom != '-' and chrom != '' :
if re . search ( r'\|' , chrom ) and chrom not in [ 'X|Y' , 'X; Y' ] : # means that there ' s uncertainty in the mapping .
# so skip it
# TODO we ' ll need to figure out how to deal with
# > 1 loc mapping
LOG . info ( '%s is non-uniquely mapped to %s. Skipping for now.' , gene_id , chrom )
continue
# X | YXp22.33 ; Yp11.3
# if ( not re . match (
# r ' ( \ d + | ( MT ) | [ XY ] | ( Un ) $ ' , str ( chr ) . strip ( ) ) ) :
# print ( ' odd chr = ' , str ( chr ) )
if chrom == 'X; Y' :
chrom = 'X|Y'
# rewrite the PAR regions for processing
# do this in a loop to allow PAR regions like X | Y
for chromosome in re . split ( r'\|' , chrom ) : # assume that the chromosome label is added elsewhere
geno . addChromosomeClass ( chromosome , tax_id , None )
mychrom = makeChromID ( chromosome , tax_num , 'CHR' )
# temporarily use taxnum for the disambiguating label
mychrom_syn = makeChromLabel ( chromosome , tax_num )
model . addSynonym ( mychrom , mychrom_syn )
map_loc = row [ col . index ( 'map_location' ) ] . strip ( )
band_match = re . match ( band_regex , map_loc )
if band_match is not None and len ( band_match . groups ( ) ) > 0 : # if tax _ num ! = ' 9606 ' :
# continue
# this matches the regular kind of chrs ,
# so make that kind of band
# not sure why this matches ?
# chrX | Y or 10090chr12 | Un "
# TODO we probably need a different regex
# per organism
# the maploc _ id already has the numeric chromosome
# in it , strip it first
bid = re . sub ( r'^' + chromosome , '' , map_loc )
# the generic location ( no coordinates )
maploc_id = makeChromID ( chromosome + bid , tax_num , 'CHR' )
# print ( map _ loc , ' - - > ' , bid , ' - - > ' , maploc _ id )
# Assume it ' s type will be added elsewhere
band = Feature ( graph , maploc_id , None , None )
band . addFeatureToGraph ( )
# add the band as the containing feature
graph . addTriple ( gene_id , self . globaltt [ 'is subsequence of' ] , maploc_id )
else : # TODO handle these cases : examples are :
# 15q11 - q22 , Xp21.2 - p11.23,15q22 - qter , 10q11.1 - q24,
# 12p13.3 - p13.2 | 12p13 - p12,1p13.3 | 1p21.3 - p13.1,
# 12cen - q21,22q13.3 | 22q13.3
LOG . debug ( 'not regular band pattern for %s: %s' , gene_id , map_loc )
# add the gene as a subsequence of the chromosome
graph . addTriple ( gene_id , self . globaltt [ 'is subsequence of' ] , mychrom )
geno . addTaxon ( tax_id , gene_id )
return |
def clear_data ( self ) :
"""Removes the content data .
raise : NoAccess - ` ` Metadata . isRequired ( ) ` ` is ` ` true ` ` or
` ` Metadata . isReadOnly ( ) ` ` is ` ` true ` `
* compliance : mandatory - - This method must be implemented . *""" | # cjshaw @ mit . edu , Jan 9 , 2015
# Removes the item from AWS S3 and resets URL to ' '
odl_repo , url = get_aws_s3_handle ( self . _config_map )
existing_url = self . _payload . get_url_metadata ( ) . get_existing_string_values ( ) [ 0 ]
# try to clear from payload first , in case that fails we won ' t mess with AWS
self . _payload . clear_url ( )
key_path = existing_url . replace ( url , '' )
# for boto3 , remove leading /
if key_path [ 0 ] == '/' :
key_path = key_path [ 1 : : ]
odl_repo . delete_object ( Bucket = self . _config_map [ 's3_bucket' ] , Key = key_path ) |
def create_cmd ( self , args ) :
'''' create ' sub - command
: param args : cli arguments
: return :''' | cmd = args . get ( 'cmd_create' )
if cmd == 'conf' :
conf_file = args [ 'conf_file' ]
conf_id = args [ 'id' ]
return self . load_xml_conf ( conf_file , conf_id )
else :
print ( "Error: Create %s is invalid or not implemented" % cmd ) |
def Main ( ) :
"""The main function .
Returns :
bool : True if successful or False otherwise .""" | tool = image_export_tool . ImageExportTool ( )
if not tool . ParseArguments ( ) :
return False
if tool . list_signature_identifiers :
tool . ListSignatureIdentifiers ( )
return True
if not tool . has_filters :
logging . warning ( 'No filter defined exporting all files.' )
# TODO : print more status information like PrintOptions .
tool . PrintFilterCollection ( )
try :
tool . ProcessSources ( )
except ( KeyboardInterrupt , errors . UserAbort ) :
logging . warning ( 'Aborted by user.' )
return False
except errors . BadConfigOption as exception :
logging . warning ( exception )
return False
except errors . SourceScannerError as exception :
logging . warning ( ( 'Unable to scan for a supported filesystem with error: {0!s}\n' 'Most likely the image format is not supported by the ' 'tool.' ) . format ( exception ) )
return False
return True |
def _add_tip_during_transfer ( self , tips , ** kwargs ) :
"""Performs a : any : ` pick _ up _ tip ` when running a : any : ` transfer ` ,
: any : ` distribute ` , or : any : ` consolidate ` .""" | if self . has_tip_rack ( ) and tips > 0 and not self . current_tip ( ) :
self . pick_up_tip ( ) |
def raw ( self , use_local = True ) :
"""Load the raw dataset from remote URL or local file
Parameters
use _ local : boolean
If True ( default ) , then attempt to load the dataset locally . If
False or if the dataset is not available locally , then load the
data from an external URL .""" | if use_local and self . is_local :
return pkgutil . get_data ( 'vega_datasets' , self . pkg_filename )
else :
return urlopen ( self . url ) . read ( ) |
def load_tensor ( f , format = None ) : # type : ( Union [ IO [ bytes ] , Text ] , Optional [ Any ] ) - > TensorProto
'''Loads a serialized TensorProto into memory
@ params
f can be a file - like object ( has " read " function ) or a string containing a file name
format is for future use
@ return
Loaded in - memory TensorProto''' | s = _load_bytes ( f )
return load_tensor_from_string ( s , format = format ) |
def send_game ( self , chat_id : Union [ int , str ] , game_short_name : str , disable_notification : bool = None , reply_to_message_id : int = None , reply_markup : Union [ "pyrogram.InlineKeyboardMarkup" , "pyrogram.ReplyKeyboardMarkup" , "pyrogram.ReplyKeyboardRemove" , "pyrogram.ForceReply" ] = None ) -> "pyrogram.Message" :
"""Use this method to send a game .
Args :
chat _ id ( ` ` int ` ` | ` ` str ` ` ) :
Unique identifier ( int ) or username ( str ) of the target chat .
For your personal cloud ( Saved Messages ) you can simply use " me " or " self " .
For a contact that exists in your Telegram address book you can use his phone number ( str ) .
game _ short _ name ( ` ` str ` ` ) :
Short name of the game , serves as the unique identifier for the game . Set up your games via Botfather .
disable _ notification ( ` ` bool ` ` , * optional * ) :
Sends the message silently .
Users will receive a notification with no sound .
reply _ to _ message _ id ( ` ` int ` ` , * optional * ) :
If the message is a reply , ID of the original message .
reply _ markup ( : obj : ` InlineKeyboardMarkup ` , * optional * ) :
An object for an inline keyboard . If empty , one ‘ Play game _ title ’ button will be shown automatically .
If not empty , the first button must launch the game .
Returns :
On success , the sent : obj : ` Message ` is returned .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .""" | r = self . send ( functions . messages . SendMedia ( peer = self . resolve_peer ( chat_id ) , media = types . InputMediaGame ( id = types . InputGameShortName ( bot_id = types . InputUserSelf ( ) , short_name = game_short_name ) , ) , message = "" , silent = disable_notification or None , reply_to_msg_id = reply_to_message_id , random_id = self . rnd_id ( ) , reply_markup = reply_markup . write ( ) if reply_markup else None ) )
for i in r . updates :
if isinstance ( i , ( types . UpdateNewMessage , types . UpdateNewChannelMessage ) ) :
return pyrogram . Message . _parse ( self , i . message , { i . id : i for i in r . users } , { i . id : i for i in r . chats } ) |
def extract_subtree ( self , node ) :
'''Return a copy of the subtree rooted at ` ` node ` `
Args :
` ` node ` ` ( ` ` Node ` ` ) : The root of the desired subtree
Returns :
` ` Tree ` ` : A copy of the subtree rooted at ` ` node ` `''' | if not isinstance ( node , Node ) :
raise TypeError ( "node must be a Node" )
r = self . root ;
self . root = node ;
o = copy ( self ) ;
self . root = r ;
return o |
def make_arrow ( self , portal ) :
"""Make an : class : ` Arrow ` to represent a : class : ` Portal ` , store it ,
and return it .""" | if ( portal [ "origin" ] not in self . spot or portal [ "destination" ] not in self . spot ) :
raise ValueError ( "An :class:`Arrow` should only be made after " "the :class:`Spot`s it connects" )
if ( portal [ "origin" ] in self . arrow and portal [ "destination" ] in self . arrow [ portal [ "origin" ] ] ) :
raise KeyError ( "Already have an Arrow for this Portal" )
return self . _core_make_arrow ( portal , self . spot [ portal [ 'origin' ] ] , self . spot [ portal [ 'destination' ] ] , self . arrow ) |
def _analyze_txt_config ( self , config_string = None ) :
"""Analyze the given container and return the corresponding job .
If ` ` config _ string ` ` is ` ` None ` ` ,
try reading it from the TXT config file inside the container .
: param string config _ string : the configuration string
: rtype : : class : ` ~ aeneas . job . Job `""" | self . log ( u"Analyzing container with TXT config string" )
if config_string is None :
self . log ( u"Analyzing container with TXT config file" )
config_entry = self . container . entry_config_txt
self . log ( [ u"Found TXT config entry '%s'" , config_entry ] )
config_dir = os . path . dirname ( config_entry )
self . log ( [ u"Directory of TXT config entry: '%s'" , config_dir ] )
self . log ( [ u"Reading TXT config entry: '%s'" , config_entry ] )
config_contents = self . container . read_entry ( config_entry )
self . log ( u"Converting config contents to config string" )
config_contents = gf . safe_unicode ( config_contents )
config_string = gf . config_txt_to_string ( config_contents )
else :
self . log ( [ u"Analyzing container with TXT config string '%s'" , config_string ] )
config_dir = ""
self . log ( u"Creating the Job object" )
job = Job ( config_string )
self . log ( u"Getting entries" )
entries = self . container . entries
self . log ( u"Converting config string into config dict" )
parameters = gf . config_string_to_dict ( config_string )
self . log ( u"Calculating the path of the tasks root directory" )
tasks_root_directory = gf . norm_join ( config_dir , parameters [ gc . PPN_JOB_IS_HIERARCHY_PREFIX ] )
self . log ( [ u"Path of the tasks root directory: '%s'" , tasks_root_directory ] )
self . log ( u"Calculating the path of the sync map root directory" )
sync_map_root_directory = gf . norm_join ( config_dir , parameters [ gc . PPN_JOB_OS_HIERARCHY_PREFIX ] )
job_os_hierarchy_type = parameters [ gc . PPN_JOB_OS_HIERARCHY_TYPE ]
self . log ( [ u"Path of the sync map root directory: '%s'" , sync_map_root_directory ] )
text_file_relative_path = parameters [ gc . PPN_JOB_IS_TEXT_FILE_RELATIVE_PATH ]
self . log ( [ u"Relative path for text file: '%s'" , text_file_relative_path ] )
text_file_name_regex = re . compile ( r"" + parameters [ gc . PPN_JOB_IS_TEXT_FILE_NAME_REGEX ] )
self . log ( [ u"Regex for text file: '%s'" , parameters [ gc . PPN_JOB_IS_TEXT_FILE_NAME_REGEX ] ] )
audio_file_relative_path = parameters [ gc . PPN_JOB_IS_AUDIO_FILE_RELATIVE_PATH ]
self . log ( [ u"Relative path for audio file: '%s'" , audio_file_relative_path ] )
audio_file_name_regex = re . compile ( r"" + parameters [ gc . PPN_JOB_IS_AUDIO_FILE_NAME_REGEX ] )
self . log ( [ u"Regex for audio file: '%s'" , parameters [ gc . PPN_JOB_IS_AUDIO_FILE_NAME_REGEX ] ] )
if parameters [ gc . PPN_JOB_IS_HIERARCHY_TYPE ] == HierarchyType . FLAT :
self . log ( u"Looking for text/audio pairs in flat hierarchy" )
text_files = self . _find_files ( entries , tasks_root_directory , text_file_relative_path , text_file_name_regex )
self . log ( [ u"Found text files: '%s'" , text_files ] )
audio_files = self . _find_files ( entries , tasks_root_directory , audio_file_relative_path , audio_file_name_regex )
self . log ( [ u"Found audio files: '%s'" , audio_files ] )
self . log ( u"Matching files in flat hierarchy..." )
matched_tasks = self . _match_files_flat_hierarchy ( text_files , audio_files )
self . log ( u"Matching files in flat hierarchy... done" )
for task_info in matched_tasks :
self . log ( [ u"Creating task: '%s'" , str ( task_info ) ] )
task = self . _create_task ( task_info , config_string , sync_map_root_directory , job_os_hierarchy_type )
job . add_task ( task )
if parameters [ gc . PPN_JOB_IS_HIERARCHY_TYPE ] == HierarchyType . PAGED :
self . log ( u"Looking for text/audio pairs in paged hierarchy" )
# find all subdirectories of tasks _ root _ directory
# that match gc . PPN _ JOB _ IS _ TASK _ DIRECTORY _ NAME _ REGEX
matched_directories = self . _match_directories ( entries , tasks_root_directory , parameters [ gc . PPN_JOB_IS_TASK_DIRECTORY_NAME_REGEX ] )
for matched_directory in matched_directories : # rebuild the full path
matched_directory_full_path = gf . norm_join ( tasks_root_directory , matched_directory )
self . log ( [ u"Looking for text/audio pairs in directory '%s'" , matched_directory_full_path ] )
# look for text and audio files there
text_files = self . _find_files ( entries , matched_directory_full_path , text_file_relative_path , text_file_name_regex )
self . log ( [ u"Found text files: '%s'" , text_files ] )
audio_files = self . _find_files ( entries , matched_directory_full_path , audio_file_relative_path , audio_file_name_regex )
self . log ( [ u"Found audio files: '%s'" , audio_files ] )
# if we have found exactly one text and one audio file ,
# create a Task
if ( len ( text_files ) == 1 ) and ( len ( audio_files ) == 1 ) :
self . log ( [ u"Exactly one text file and one audio file in '%s'" , matched_directory ] )
task_info = [ matched_directory , text_files [ 0 ] , audio_files [ 0 ] ]
self . log ( [ u"Creating task: '%s'" , str ( task_info ) ] )
task = self . _create_task ( task_info , config_string , sync_map_root_directory , job_os_hierarchy_type )
job . add_task ( task )
elif len ( text_files ) > 1 :
self . log ( [ u"More than one text file in '%s'" , matched_directory ] )
elif len ( audio_files ) > 1 :
self . log ( [ u"More than one audio file in '%s'" , matched_directory ] )
else :
self . log ( [ u"No text nor audio file in '%s'" , matched_directory ] )
return job |
def request ( self , method , endpoint , payload = None , timeout = 5 ) :
"""Send request to API .""" | url = self . api_url + endpoint
data = None
headers = { }
if payload is not None :
data = json . dumps ( payload )
headers [ 'Content-Type' ] = 'application/json'
try :
if self . auth_token is not None :
headers [ API_AUTH_HEADER ] = self . auth_token
response = self . session . request ( method , url , data = data , headers = headers , timeout = timeout )
if response . status_code != 401 :
return response
_LOGGER . debug ( "Renewing auth token" )
if not self . login ( timeout = timeout ) :
return None
# Retry request
headers [ API_AUTH_HEADER ] = self . auth_token
return self . session . request ( method , url , data = data , headers = headers , timeout = timeout )
except requests . exceptions . ConnectionError :
_LOGGER . warning ( "Unable to connect to %s" , url )
except requests . exceptions . Timeout :
_LOGGER . warning ( "No response from %s" , url )
return None |
def get_response ( url , plugins , timeout = SPLASH_TIMEOUT ) :
"""Return response with HAR , inline scritps and software detected by JS matchers .
: rtype : dict""" | lua_script = create_lua_script ( plugins )
lua = urllib . parse . quote_plus ( lua_script )
page_url = f'{SPLASH_URL}/execute?url={url}&timeout={timeout}&lua_source={lua}'
try :
with docker_container ( ) :
logger . debug ( '[+] Sending request to Splash instance' )
res = requests . get ( page_url )
except requests . exceptions . ConnectionError :
raise SplashError ( "Could not connect to Splash server {}" . format ( SPLASH_URL ) )
logger . debug ( '[+] Response received' )
json_data = res . json ( )
if res . status_code in ERROR_STATUS_CODES :
raise SplashError ( get_splash_error ( json_data ) )
softwares = json_data [ 'softwares' ]
scripts = json_data [ 'scripts' ] . values ( )
har = get_valid_har ( json_data [ 'har' ] )
js_error = get_evaljs_error ( json_data )
if js_error :
logger . debug ( '[+] WARNING: failed to eval JS matchers: %(n)s' , { 'n' : js_error } )
else :
logger . debug ( '[+] Detected %(n)d softwares from the DOM' , { 'n' : len ( softwares ) } )
logger . debug ( '[+] Detected %(n)d scripts from the DOM' , { 'n' : len ( scripts ) } )
logger . debug ( '[+] Final HAR has %(n)d valid entries' , { 'n' : len ( har ) } )
return { 'har' : har , 'scripts' : scripts , 'softwares' : softwares } |
def parse ( self , path ) :
"""Extracts a dictionary of values from the XML file at the specified path .""" | # Load the template that will be used for parsing the values .
expath , template , root = self . _load_template ( path )
if expath is not None :
values = template . parse ( root )
return ( values , template ) |
def _search ( self , search_terms , begins_with = None ) :
"""Returns a list of Archive id ' s in the table on Dynamo""" | kwargs = dict ( ProjectionExpression = '#id' , ExpressionAttributeNames = { "#id" : "_id" } )
if len ( search_terms ) > 0 :
kwargs [ 'FilterExpression' ] = reduce ( lambda x , y : x & y , [ Attr ( 'tags' ) . contains ( arg ) for arg in search_terms ] )
if begins_with :
if 'FilterExpression' in kwargs :
kwargs [ 'FilterExpression' ] = kwargs [ 'FilterExpression' ] & Key ( '_id' ) . begins_with ( begins_with )
else :
kwargs [ 'FilterExpression' ] = Key ( '_id' ) . begins_with ( begins_with )
while True :
res = self . _table . scan ( ** kwargs )
for r in res [ 'Items' ] :
yield r [ '_id' ]
if 'LastEvaluatedKey' in res :
kwargs [ 'ExclusiveStartKey' ] = res [ 'LastEvaluatedKey' ]
else :
break |
def to_code_array ( self ) :
"""Replaces everything in code _ array from xls _ file""" | self . _xls2shape ( )
worksheets = self . workbook . sheet_names ( )
for tab , worksheet_name in enumerate ( worksheets ) :
worksheet = self . workbook . sheet_by_name ( worksheet_name )
self . _xls2code ( worksheet , tab )
self . _xls2attributes ( worksheet , tab )
self . _xls2row_heights ( worksheet , tab )
self . _xls2col_widths ( worksheet , tab ) |
def _extract_blocks ( x , block_h , block_w ) :
"""Helper function for local 2d attention .
Args :
x : a [ batch , height , width , depth ] tensor
block _ h : An integer . block height
block _ w : An inteter . block width
returns :
a [ batch , num _ heads , height / block _ h , width / block _ w , depth ] tensor""" | ( _ , height , width , depth ) = common_layers . shape_list ( x )
assert height % block_h == 0
assert width % block_w == 0
x = tf . reshape ( x , [ - 1 , height // block_h , block_h , width // block_w , block_w , depth ] )
return tf . transpose ( x , [ 0 , 1 , 3 , 2 , 4 , 5 ] ) |
def package_meta ( ) :
"""Read _ _ init _ _ . py for global package metadata .
Do this without importing the package .""" | _version_re = re . compile ( r'__version__\s+=\s+(.*)' )
_url_re = re . compile ( r'__url__\s+=\s+(.*)' )
_license_re = re . compile ( r'__license__\s+=\s+(.*)' )
with open ( 'lambda_uploader/__init__.py' , 'rb' ) as ffinit :
initcontent = ffinit . read ( )
version = str ( ast . literal_eval ( _version_re . search ( initcontent . decode ( 'utf-8' ) ) . group ( 1 ) ) )
url = str ( ast . literal_eval ( _url_re . search ( initcontent . decode ( 'utf-8' ) ) . group ( 1 ) ) )
licencia = str ( ast . literal_eval ( _license_re . search ( initcontent . decode ( 'utf-8' ) ) . group ( 1 ) ) )
return { 'version' : version , 'license' : licencia , 'url' : url , } |
def rotate_bitmaps_to_roots ( bitmaps , roots ) :
"""Circularly shift a relative bitmaps to asbolute pitch classes .
See : func : ` rotate _ bitmap _ to _ root ` for more information .
Parameters
bitmap : np . ndarray , shape = ( N , 12)
Bitmap of active notes , relative to the given root .
root : np . ndarray , shape = ( N , )
Absolute pitch class number .
Returns
bitmap : np . ndarray , shape = ( N , 12)
Absolute bitmaps of active pitch classes .""" | abs_bitmaps = [ ]
for bitmap , chord_root in zip ( bitmaps , roots ) :
abs_bitmaps . append ( rotate_bitmap_to_root ( bitmap , chord_root ) )
return np . asarray ( abs_bitmaps ) |
def cli ( env , volume_id , capacity , tier , upgrade ) :
"""Order snapshot space for a file storage volume .""" | file_manager = SoftLayer . FileStorageManager ( env . client )
if tier is not None :
tier = float ( tier )
try :
order = file_manager . order_snapshot_space ( volume_id , capacity = capacity , tier = tier , upgrade = upgrade )
except ValueError as ex :
raise exceptions . ArgumentError ( str ( ex ) )
if 'placedOrder' in order . keys ( ) :
click . echo ( "Order #{0} placed successfully!" . format ( order [ 'placedOrder' ] [ 'id' ] ) )
for item in order [ 'placedOrder' ] [ 'items' ] :
click . echo ( " > %s" % item [ 'description' ] )
if 'status' in order [ 'placedOrder' ] . keys ( ) :
click . echo ( " > Order status: %s" % order [ 'placedOrder' ] [ 'status' ] )
else :
click . echo ( "Order could not be placed! Please verify your options " + "and try again." ) |
def receive ( self , path , diffTo , diffFrom ) :
"""Receive a btrfs diff .""" | diff = self . toObj . diff ( diffTo , diffFrom )
self . _open ( self . butterStore . receive ( diff , [ path , ] ) ) |
def scale ( self , width : int , height : int ) -> None :
"""Scale this Image to the new width and height .
Args :
width ( int ) : The new width of the Image after scaling .
height ( int ) : The new height of the Image after scaling .""" | lib . TCOD_image_scale ( self . image_c , width , height )
self . width , self . height = width , height |
def p_print_list_at ( p ) :
"""print _ at : AT expr COMMA expr""" | p [ 0 ] = make_sentence ( 'PRINT_AT' , make_typecast ( TYPE . ubyte , p [ 2 ] , p . lineno ( 1 ) ) , make_typecast ( TYPE . ubyte , p [ 4 ] , p . lineno ( 3 ) ) ) |
def _make_temp_directory ( prefix ) :
'''Generate a temporary directory that would not live beyond the lifetime of
unity _ server .
Caller is expected to clean up the temp file as soon as the directory is no
longer needed . But the directory will be cleaned as unity _ server restarts''' | temp_dir = _make_temp_filename ( prefix = str ( prefix ) )
_os . makedirs ( temp_dir )
return temp_dir |
def fetchUserJobs ( self , jobs ) :
"""Takes a user input array of jobs , verifies that they are in the jobStore
and returns the array of jobsToReport .
: param list jobs : A list of jobs to be verified .
: returns jobsToReport : A list of jobs which are verified to be in the jobStore .""" | jobsToReport = [ ]
for jobID in jobs :
try :
jobsToReport . append ( self . jobStore . load ( jobID ) )
except JobException :
print ( 'The job %s could not be found.' % jobID , file = sys . stderr )
raise
return jobsToReport |
def _validate_platforms_in_image ( self , image ) :
"""Ensure that the image provides all platforms expected for the build .""" | expected_platforms = get_platforms ( self . workflow )
if not expected_platforms :
self . log . info ( 'Skipping validation of available platforms ' 'because expected platforms are unknown' )
return
if len ( expected_platforms ) == 1 :
self . log . info ( 'Skipping validation of available platforms for base image ' 'because this is a single platform build' )
return
if not image . registry :
self . log . info ( 'Cannot validate available platforms for base image ' 'because base image registry is not defined' )
return
try :
platform_to_arch = get_platform_to_goarch_mapping ( self . workflow )
except KeyError :
self . log . info ( 'Cannot validate available platforms for base image ' 'because platform descriptors are not defined' )
return
manifest_list = self . _get_manifest_list ( image )
if not manifest_list :
raise RuntimeError ( 'Unable to fetch manifest list for base image' )
all_manifests = manifest_list . json ( ) [ 'manifests' ]
manifest_list_arches = set ( manifest [ 'platform' ] [ 'architecture' ] for manifest in all_manifests )
expected_arches = set ( platform_to_arch [ platform ] for platform in expected_platforms )
self . log . info ( 'Manifest list arches: %s, expected arches: %s' , manifest_list_arches , expected_arches )
assert manifest_list_arches >= expected_arches , 'Missing arches in manifest list for base image'
self . log . info ( 'Base image is a manifest list for all required platforms' ) |
def save ( self ) :
"""Saves pypirc file with new configuration information .""" | for server , conf in self . servers . iteritems ( ) :
self . _add_index_server ( )
for conf_k , conf_v in conf . iteritems ( ) :
if not self . conf . has_section ( server ) :
self . conf . add_section ( server )
self . conf . set ( server , conf_k , conf_v )
with open ( self . rc_file , 'wb' ) as configfile :
self . conf . write ( configfile )
self . conf . read ( self . rc_file ) |
def search ( self , index = None , doc_type = None , body = None , ** query_params ) :
"""Make a search query on the elastic search
` < http : / / www . elastic . co / guide / en / elasticsearch / reference / current / search - search . html > ` _
: param index : the index name to query
: param doc _ type : he doc type to search in
: param body : the query
: param query _ params : params
: arg _ source : True or false to return the _ source field or not , or a
list of fields to return
: arg _ source _ exclude : A list of fields to exclude from the returned
_ source field
: arg _ source _ include : A list of fields to extract and return from the
_ source field
: arg allow _ no _ indices : Whether to ignore if a wildcard indices
expression resolves into no concrete indices . ( This includes ` _ all `
string or when no indices have been specified )
: arg analyze _ wildcard : Specify whether wildcard and prefix queries
should be analyzed ( default : false )
: arg analyzer : The analyzer to use for the query string
: arg default _ operator : The default operator for query string query ( AND
or OR ) , default ' OR ' , valid choices are : ' AND ' , ' OR '
: arg df : The field to use as default where no field prefix is given in
the query string
: arg expand _ wildcards : Whether to expand wildcard expression to concrete
indices that are open , closed or both . , default ' open ' , valid
choices are : ' open ' , ' closed ' , ' none ' , ' all '
: arg explain : Specify whether to return detailed information about score
computation as part of a hit
: arg fielddata _ fields : A comma - separated list of fields to return as the
field data representation of a field for each hit
: arg fields : A comma - separated list of fields to return as part of a hit
: arg from \ _ : Starting offset ( default : 0)
: arg ignore _ unavailable : Whether specified concrete indices should be
ignored when unavailable ( missing or closed )
: arg lenient : Specify whether format - based query failures ( such as
providing text to a numeric field ) should be ignored
: arg lowercase _ expanded _ terms : Specify whether query terms should be
lowercased
: arg preference : Specify the node or shard the operation should be
performed on ( default : random )
: arg q : Query in the Lucene query string syntax
: arg request _ cache : Specify if request cache should be used for this
request or not , defaults to index level setting
: arg routing : A comma - separated list of specific routing values
: arg scroll : Specify how long a consistent view of the index should be
maintained for scrolled search
: arg search _ type : Search operation type , valid choices are :
' query _ then _ fetch ' , ' dfs _ query _ then _ fetch '
: arg size : Number of hits to return ( default : 10)
: arg sort : A comma - separated list of < field > : < direction > pairs
: arg stats : Specific ' tag ' of the request for logging and statistical
purposes
: arg suggest _ field : Specify which field to use for suggestions
: arg suggest _ mode : Specify suggest mode , default ' missing ' , valid
choices are : ' missing ' , ' popular ' , ' always '
: arg suggest _ size : How many suggestions to return in response
: arg suggest _ text : The source text for which the suggestions should be
returned
: arg terminate _ after : The maximum number of documents to collect for
each shard , upon reaching which the query execution will terminate
early .
: arg timeout : Explicit operation timeout
: arg track _ scores : Whether to calculate and return scores even if they
are not used for sorting
: arg version : Specify whether to return document version as part of a
hit""" | path = self . _es_parser . make_path ( index , doc_type , EsMethods . SEARCH )
result = yield self . _perform_request ( HttpMethod . POST , path , body = body , params = query_params )
returnValue ( result ) |
def response_hook ( self , r , ** kwargs ) :
"""The actual hook handler .""" | if r . status_code == 401 : # Handle server auth .
www_authenticate = r . headers . get ( 'www-authenticate' , '' ) . lower ( )
auth_type = _auth_type_from_header ( www_authenticate )
if auth_type is not None :
return self . retry_using_http_NTLM_auth ( 'www-authenticate' , 'Authorization' , r , auth_type , kwargs )
elif r . status_code == 407 : # If we didn ' t have server auth , do proxy auth .
proxy_authenticate = r . headers . get ( 'proxy-authenticate' , '' ) . lower ( )
auth_type = _auth_type_from_header ( proxy_authenticate )
if auth_type is not None :
return self . retry_using_http_NTLM_auth ( 'proxy-authenticate' , 'Proxy-authorization' , r , auth_type , kwargs )
return r |
def _get_cached_stats ( self , app_stats ) :
"""Process :
* cached _ host _ check _ stats
* cached _ service _ check _ stats""" | stats = { }
app_keys = [ "cached_host_check_stats" , "cached_service_check_stats" , ]
for app_key in app_keys :
if app_key not in app_stats . keys ( ) :
continue
( x01 , x05 , x15 ) = self . _convert_tripplet ( app_stats [ app_key ] )
scratch = app_key . split ( "_" ) [ 1 ]
stats [ "%ss.cached.01" % ( scratch ) ] = x01
stats [ "%ss.cached.05" % ( scratch ) ] = x05
stats [ "%ss.cached.15" % ( scratch ) ] = x15
return stats |
def by_id ( cls , user_id , db_session = None ) :
"""fetch user by user id
: param user _ id :
: param db _ session :
: return :""" | db_session = get_db_session ( db_session )
query = db_session . query ( cls . model )
query = query . filter ( cls . model . id == user_id )
query = query . options ( sa . orm . eagerload ( "groups" ) )
return query . first ( ) |
def get_n_header ( f , header_char = '"' ) :
'''Get the nummber of header rows in a Little Leonardo data file
Args
f : file stream
File handle for the file from which header rows will be read
header _ char : str
Character array at beginning of each header line
Returns
n _ header : int
Number of header rows in Little Leonardo data file''' | n_header = 0
reading_headers = True
while reading_headers :
line = f . readline ( )
if line . startswith ( header_char ) :
n_header += 1
else :
reading_headers = False
return n_header |
def get_config ( self , budget ) :
"""Function to sample a new configuration
This function is called inside BOHB to query a new configuration
Parameters :
budget : float
the budget for which this configuration is scheduled
Returns
config
return a valid configuration with parameters and budget""" | logger . debug ( 'start sampling a new configuration.' )
sample = None
info_dict = { }
# If no model is available , sample from prior
# also mix in a fraction of random configs
if len ( self . kde_models . keys ( ) ) == 0 or np . random . rand ( ) < self . random_fraction :
sample = self . configspace . sample_configuration ( )
info_dict [ 'model_based_pick' ] = False
if sample is None :
sample , info_dict = self . sample_from_largest_budget ( info_dict )
sample = ConfigSpace . util . deactivate_inactive_hyperparameters ( configuration_space = self . configspace , configuration = sample . get_dictionary ( ) ) . get_dictionary ( )
logger . debug ( 'done sampling a new configuration.' )
sample [ 'TRIAL_BUDGET' ] = budget
return sample |
def validate_term ( term , so = None , method = "verify" ) :
"""Validate an SO term against so . obo""" | if so is None :
so = load_GODag ( )
oterm = term
if term not in so . valid_names :
if "resolve" in method :
if "_" in term :
tparts = deque ( term . split ( "_" ) )
tparts . pop ( ) if "prefix" in method else tparts . popleft ( )
nterm = "_" . join ( tparts ) . strip ( )
term = validate_term ( nterm , so = so , method = method )
if term is None :
return None
else :
logging . error ( "Term `{0}` does not exist" . format ( term ) )
sys . exit ( 1 )
if oterm != term :
logging . debug ( "Resolved term `{0}` to `{1}`" . format ( oterm , term ) )
return term |
def expand_groups ( config_ids , maps ) :
"""Iterates over a list of container configuration ids , expanding groups of container configurations .
: param config _ ids : List of container configuration ids .
: type config _ ids : collections . Iterable [ dockermap . map . input . InputConfigId | dockermap . map . input . MapConfigId ]
: param maps : Extended container maps .
: type maps : dict [ unicode | str , dockermap . map . config . main . ContainerMap ]
: return : Expanded MapConfigId tuples .
: rtype : collections . Iterable [ dockermap . map . input . InputConfigId ]""" | for config_id in config_ids :
if config_id . map_name == '__all__' :
c_maps = six . iteritems ( maps )
else :
c_maps = ( config_id . map_name , maps [ config_id . map_name ] ) ,
if isinstance ( config_id , InputConfigId ) :
instance_name = config_id . instance_names
elif isinstance ( config_id , MapConfigId ) :
instance_name = ( config_id . instance_name , )
else :
raise ValueError ( "Expected InputConfigId or MapConfigId tuple; found {0}." "" . format ( type ( config_id ) . __name__ ) )
for map_name , c_map in c_maps :
if config_id . config_name == '__all__' and config_id . config_type == ItemType . CONTAINER :
for config_name in six . iterkeys ( c_map . containers ) :
yield MapConfigId ( config_id . config_type , map_name , config_name , instance_name )
else :
group = c_map . groups . get ( config_id . config_name )
if group is not None :
for group_item in group :
if isinstance ( group_item , MapConfigId ) :
yield group_item
elif isinstance ( group_item , six . string_types ) :
config_name , __ , instance = group_item . partition ( '.' )
yield MapConfigId ( config_id . config_type , map_name , config_name , ( instance , ) if instance else instance_name )
else :
raise ValueError ( "Invalid group item. Must be string or MapConfigId tuple; " "found {0}." . format ( type ( group_item ) . __name__ ) )
else :
yield MapConfigId ( config_id . config_type , map_name , config_id . config_name , instance_name ) |
def _merge_user_attrs ( self , attrs_backend , attrs_out , backend_name ) :
"""merge attributes from one backend search to the attributes dict
output""" | for attr in attrs_backend :
if attr in self . attributes . backend_attributes [ backend_name ] :
attrid = self . attributes . backend_attributes [ backend_name ] [ attr ]
if attrid not in attrs_out :
attrs_out [ attrid ] = attrs_backend [ attr ] |
def _update_length ( self ) :
"""Update the length field of the struct .""" | action_length = 4 + len ( self . field . pack ( ) )
overflow = action_length % 8
self . length = action_length
if overflow :
self . length = action_length + 8 - overflow |
def _split ( self , string ) :
"""Iterates over the ngrams of a string ( no padding ) .
> > > from ngram import NGram
> > > n = NGram ( )
> > > list ( n . _ split ( " hamegg " ) )
[ ' ham ' , ' ame ' , ' meg ' , ' egg ' ]""" | for i in range ( len ( string ) - self . N + 1 ) :
yield string [ i : i + self . N ] |
def parse_names ( cls , expression ) :
"""Return the list of identifiers used in the expression .""" | names = set ( )
try :
ast_node = ast . parse ( expression , "ast" )
class Visitor ( ast . NodeVisitor ) :
def visit_Name ( self , node ) :
names . add ( node . id )
Visitor ( ) . visit ( ast_node )
except Exception :
pass
return names |
def rdist ( x , y ) :
"""Reduced Euclidean distance .
Parameters
x : array of shape ( embedding _ dim , )
y : array of shape ( embedding _ dim , )
Returns
The squared euclidean distance between x and y""" | result = 0.0
for i in range ( x . shape [ 0 ] ) :
result += ( x [ i ] - y [ i ] ) ** 2
return result |
def __set_formula ( self , formula ) :
"""Sets a formula in this cell .
Any cell value can be set using this method . Actual formulas must
start with an equal sign .""" | array = ( ( self . _clean_formula ( formula ) , ) , )
return self . _get_target ( ) . setFormulaArray ( array ) |
def _symmetrize_correlograms ( correlograms ) :
"""Return the symmetrized version of the CCG arrays .""" | n_clusters , _ , n_bins = correlograms . shape
assert n_clusters == _
# We symmetrize c [ i , j , 0 ] .
# This is necessary because the algorithm in correlograms ( )
# is sensitive to the order of identical spikes .
correlograms [ ... , 0 ] = np . maximum ( correlograms [ ... , 0 ] , correlograms [ ... , 0 ] . T )
sym = correlograms [ ... , 1 : ] [ ... , : : - 1 ]
sym = np . transpose ( sym , ( 1 , 0 , 2 ) )
return np . dstack ( ( sym , correlograms ) ) |
def multihead_attention_vars ( mesh , heads , io_channels , kv_channels , master_dtype , slice_dtype , activation_dtype ) :
"""Deprecated version of multihead _ attention _ params with combine = True .""" | return multihead_attention_params ( mesh , heads , io_channels , kv_channels , mtf . VariableDType ( master_dtype , slice_dtype , activation_dtype ) , combine = True ) |
def remove_connection ( provider_id , provider_user_id ) :
"""Remove a specific connection for the authenticated user to the
specified provider""" | provider = get_provider_or_404 ( provider_id )
ctx = dict ( provider = provider . name , user = current_user , provider_user_id = provider_user_id )
deleted = _datastore . delete_connection ( user_id = current_user . get_id ( ) , provider_id = provider_id , provider_user_id = provider_user_id )
if deleted :
after_this_request ( _commit )
msg = ( 'Connection to %(provider)s removed' % ctx , 'info' )
connection_removed . send ( current_app . _get_current_object ( ) , user = current_user . _get_current_object ( ) , provider_id = provider_id )
else :
msg = ( 'Unabled to remove connection to %(provider)s' % ctx , 'error' )
do_flash ( * msg )
return redirect ( request . referrer or get_post_login_redirect ( ) ) |
def download_and_calibrate_parallel ( list_of_ids , n = None ) :
"""Download and calibrate in parallel .
Parameters
list _ of _ ids : list , optional
container with img _ ids to process
n : int
Number of cores for the parallel processing . Default : n _ cores _ system / / 2""" | setup_cluster ( n_cores = n )
c = Client ( )
lbview = c . load_balanced_view ( )
lbview . map_async ( download_and_calibrate , list_of_ids )
subprocess . Popen ( [ "ipcluster" , "stop" , "--quiet" ] ) |
def exponential ( x , y , xscale , yscale ) :
"""Two - dimensional oriented exponential decay pattern .""" | if xscale == 0.0 or yscale == 0.0 :
return x * 0.0
with float_error_ignore ( ) :
x_w = np . divide ( x , xscale )
y_h = np . divide ( y , yscale )
return np . exp ( - np . sqrt ( x_w * x_w + y_h * y_h ) ) |
def calculate_signature ( key , data , timestamp = None ) :
"""Calculates the signature for the given request data .""" | # Create a timestamp if one was not given
if timestamp is None :
timestamp = int ( time . time ( ) )
# Construct the message from the timestamp and the data in the request
message = str ( timestamp ) + '' . join ( "%s%s" % ( k , v ) for k , v in sorted ( data . items ( ) ) )
# Calculate the signature ( HMAC SHA256 ) according to RFC 2104
signature = hmac . HMAC ( str ( key ) , message , hashlib . sha256 ) . hexdigest ( )
return signature |
def to_html ( sample , stats_object ) :
"""Generate a HTML report from summary statistics and a given sample .
Parameters
sample : DataFrame
the sample you want to print
stats _ object : dict
Summary statistics . Should be generated with an appropriate describe ( ) function
Returns
str
containing profile report in HTML format
Notes
* This function as to be refactored since it ' s huge and it contains inner functions""" | n_obs = stats_object [ 'table' ] [ 'n' ]
value_formatters = formatters . value_formatters
row_formatters = formatters . row_formatters
if not isinstance ( sample , pd . DataFrame ) :
raise TypeError ( "sample must be of type pandas.DataFrame" )
if not isinstance ( stats_object , dict ) :
raise TypeError ( "stats_object must be of type dict. Did you generate this using the pandas_profiling.describe() function?" )
if not set ( { 'table' , 'variables' , 'freq' , 'correlations' } ) . issubset ( set ( stats_object . keys ( ) ) ) :
raise TypeError ( "stats_object badly formatted. Did you generate this using the pandas_profiling.describe() function?" )
def fmt ( value , name ) :
if pd . isnull ( value ) :
return ""
if name in value_formatters :
return value_formatters [ name ] ( value )
elif isinstance ( value , float ) :
return value_formatters [ formatters . DEFAULT_FLOAT_FORMATTER ] ( value )
else :
try :
return unicode ( value )
# Python 2
except NameError :
return str ( value )
# Python 3
def _format_row ( freq , label , max_freq , row_template , n , extra_class = '' ) :
if max_freq != 0 :
width = int ( freq / max_freq * 99 ) + 1
else :
width = 1
if width > 20 :
label_in_bar = freq
label_after_bar = ""
else :
label_in_bar = " "
label_after_bar = freq
return row_template . render ( label = label , width = width , count = freq , percentage = '{:2.1f}' . format ( freq / n * 100 ) , extra_class = extra_class , label_in_bar = label_in_bar , label_after_bar = label_after_bar )
def freq_table ( freqtable , n , table_template , row_template , max_number_to_print , nb_col = 6 ) :
freq_rows_html = u''
if max_number_to_print > n :
max_number_to_print = n
if max_number_to_print < len ( freqtable ) :
freq_other = sum ( freqtable . iloc [ max_number_to_print : ] )
min_freq = freqtable . values [ max_number_to_print ]
else :
freq_other = 0
min_freq = 0
freq_missing = n - sum ( freqtable )
max_freq = max ( freqtable . values [ 0 ] , freq_other , freq_missing )
# TODO : Correctly sort missing and other
for label , freq in six . iteritems ( freqtable . iloc [ 0 : max_number_to_print ] ) :
freq_rows_html += _format_row ( freq , label , max_freq , row_template , n )
if freq_other > min_freq :
freq_rows_html += _format_row ( freq_other , "Other values (%s)" % ( freqtable . count ( ) - max_number_to_print ) , max_freq , row_template , n , extra_class = 'other' )
if freq_missing > min_freq :
freq_rows_html += _format_row ( freq_missing , "(Missing)" , max_freq , row_template , n , extra_class = 'missing' )
return table_template . render ( rows = freq_rows_html , varid = hash ( idx ) , nb_col = nb_col )
def extreme_obs_table ( freqtable , table_template , row_template , number_to_print , n , ascending = True ) : # If it ' s mixed between base types ( str , int ) convert to str . Pure " mixed " types are filtered during type discovery
if "mixed" in freqtable . index . inferred_type :
freqtable . index = freqtable . index . astype ( str )
sorted_freqTable = freqtable . sort_index ( )
if ascending :
obs_to_print = sorted_freqTable . iloc [ : number_to_print ]
else :
obs_to_print = sorted_freqTable . iloc [ - number_to_print : ]
freq_rows_html = ''
max_freq = max ( obs_to_print . values )
for label , freq in six . iteritems ( obs_to_print ) :
freq_rows_html += _format_row ( freq , label , max_freq , row_template , n )
return table_template . render ( rows = freq_rows_html )
# Variables
rows_html = u""
messages = [ ]
render_htmls = { }
for idx , row in stats_object [ 'variables' ] . iterrows ( ) :
formatted_values = { 'varname' : idx , 'varid' : hash ( idx ) }
row_classes = { }
for col , value in six . iteritems ( row ) :
formatted_values [ col ] = fmt ( value , col )
for col in set ( row . index ) & six . viewkeys ( row_formatters ) :
row_classes [ col ] = row_formatters [ col ] ( row [ col ] )
if row_classes [ col ] == "alert" and col in templates . messages :
messages . append ( templates . messages [ col ] . format ( formatted_values , varname = idx ) )
if row [ 'type' ] in { 'CAT' , 'BOOL' } :
formatted_values [ 'minifreqtable' ] = freq_table ( stats_object [ 'freq' ] [ idx ] , n_obs , templates . template ( 'mini_freq_table' ) , templates . template ( 'mini_freq_table_row' ) , 3 , templates . mini_freq_table_nb_col [ row [ 'type' ] ] )
if row [ 'distinct_count' ] > 50 :
messages . append ( templates . messages [ 'HIGH_CARDINALITY' ] . format ( formatted_values , varname = idx ) )
row_classes [ 'distinct_count' ] = "alert"
else :
row_classes [ 'distinct_count' ] = ""
if row [ 'type' ] == 'UNIQUE' :
obs = stats_object [ 'freq' ] [ idx ] . index
formatted_values [ 'firstn' ] = pd . DataFrame ( obs [ 0 : 3 ] , columns = [ "First 3 values" ] ) . to_html ( classes = "example_values" , index = False )
formatted_values [ 'lastn' ] = pd . DataFrame ( obs [ - 3 : ] , columns = [ "Last 3 values" ] ) . to_html ( classes = "example_values" , index = False )
if row [ 'type' ] == 'UNSUPPORTED' :
formatted_values [ 'varname' ] = idx
messages . append ( templates . messages [ row [ 'type' ] ] . format ( formatted_values ) )
elif row [ 'type' ] in { 'CORR' , 'CONST' , 'RECODED' } :
formatted_values [ 'varname' ] = idx
messages . append ( templates . messages [ row [ 'type' ] ] . format ( formatted_values ) )
else :
formatted_values [ 'freqtable' ] = freq_table ( stats_object [ 'freq' ] [ idx ] , n_obs , templates . template ( 'freq_table' ) , templates . template ( 'freq_table_row' ) , 10 )
formatted_values [ 'firstn_expanded' ] = extreme_obs_table ( stats_object [ 'freq' ] [ idx ] , templates . template ( 'freq_table' ) , templates . template ( 'freq_table_row' ) , 5 , n_obs , ascending = True )
formatted_values [ 'lastn_expanded' ] = extreme_obs_table ( stats_object [ 'freq' ] [ idx ] , templates . template ( 'freq_table' ) , templates . template ( 'freq_table_row' ) , 5 , n_obs , ascending = False )
rows_html += templates . row_templates_dict [ row [ 'type' ] ] . render ( values = formatted_values , row_classes = row_classes )
render_htmls [ 'rows_html' ] = rows_html
# Overview
formatted_values = { k : fmt ( v , k ) for k , v in six . iteritems ( stats_object [ 'table' ] ) }
row_classes = { }
for col in six . viewkeys ( stats_object [ 'table' ] ) & six . viewkeys ( row_formatters ) :
row_classes [ col ] = row_formatters [ col ] ( stats_object [ 'table' ] [ col ] )
if row_classes [ col ] == "alert" and col in templates . messages :
messages . append ( templates . messages [ col ] . format ( formatted_values , varname = idx ) )
messages_html = u''
for msg in messages :
messages_html += templates . message_row . format ( message = msg )
overview_html = templates . template ( 'overview' ) . render ( values = formatted_values , row_classes = row_classes , messages = messages_html )
render_htmls [ 'overview_html' ] = overview_html
# Add plot of matrix correlation if the dataframe is not empty
if len ( stats_object [ 'correlations' ] [ 'pearson' ] ) > 0 :
pearson_matrix = plot . correlation_matrix ( stats_object [ 'correlations' ] [ 'pearson' ] , 'Pearson' )
spearman_matrix = plot . correlation_matrix ( stats_object [ 'correlations' ] [ 'spearman' ] , 'Spearman' )
correlations_html = templates . template ( 'correlations' ) . render ( values = { 'pearson_matrix' : pearson_matrix , 'spearman_matrix' : spearman_matrix } )
render_htmls [ 'correlations_html' ] = correlations_html
# Add sample
sample_html = templates . template ( 'sample' ) . render ( sample_table_html = sample . to_html ( classes = "sample" ) )
render_htmls [ 'sample_html' ] = sample_html
# TODO : should be done in the template
return templates . template ( 'base' ) . render ( render_htmls ) |
def msvs_parse_version ( s ) :
"""Split a Visual Studio version , which may in fact be something like
'7.0Exp ' , into is version number ( returned as a float ) and trailing
" suite " portion .""" | num , suite = version_re . match ( s ) . groups ( )
return float ( num ) , suite |
def getTotalPrice ( self ) :
"""compute total price""" | price = self . getPrice ( )
price = Decimal ( price or '0.00' )
vat = Decimal ( self . getVAT ( ) )
vat = vat and vat / 100 or 0
price = price + ( price * vat )
return price . quantize ( Decimal ( '0.00' ) ) |
def to_datetime ( self , column ) :
'''This function converts epoch timestamps to datetimes .
: param column : column to convert from current state - > datetime''' | if column in self :
if self [ column ] . dtype in NUMPY_NUMERICAL :
self [ column ] = pd . to_datetime ( self [ column ] , unit = 's' )
else :
self [ column ] = pd . to_datetime ( self [ column ] , utc = True ) |
def parse ( cls , args ) :
"""Parse command line arguments to construct a dictionary of command
parameters that can be used to create a command
Args :
` args ` : sequence of arguments
Returns :
Dictionary that can be used in create method
Raises :
ParseError : when the arguments are not correct""" | try :
( options , args ) = cls . optparser . parse_args ( args )
if options . inline is None and options . script_location is None :
raise ParseError ( "One of script or it's location" " must be specified" , cls . optparser . format_help ( ) )
except OptionParsingError as e :
raise ParseError ( e . msg , cls . optparser . format_help ( ) )
except OptionParsingExit as e :
return None
if options . script_location is not None :
if options . inline is not None :
raise ParseError ( "Both script and script_location cannot be specified" , cls . optparser . format_help ( ) )
if ( ( options . script_location . find ( "s3://" ) != 0 ) and ( options . script_location . find ( "s3n://" ) != 0 ) ) : # script location is local file
try :
s = open ( options . script_location ) . read ( )
except IOError as e :
raise ParseError ( "Unable to open script location: %s" % str ( e ) , cls . optparser . format_help ( ) )
options . script_location = None
options . inline = s
if ( args is not None ) and ( len ( args ) > 0 ) :
if options . inline is not None :
raise ParseError ( "Extra arguments can only be " "supplied with a script_location in S3 right now" , cls . optparser . format_help ( ) )
setattr ( options , 'parameters' , " " . join ( [ pipes . quote ( a ) for a in args ] ) )
else :
if ( args is not None ) and ( len ( args ) > 0 ) :
raise ParseError ( "Extra arguments can only be supplied with a script_location" , cls . optparser . format_help ( ) )
v = vars ( options )
v [ "command_type" ] = "ShellCommand"
return v |
def __generate_tree ( self , top , src , resources , models , ctrls , views , utils ) :
"""Creates directories and packages""" | res = self . __mkdir ( top )
for fn in ( src , models , ctrls , views , utils ) :
res = self . __mkpkg ( fn ) or res
res = self . __mkdir ( resources ) or res
res = self . __mkdir ( os . path . join ( resources , "ui" , "builder" ) ) or res
res = self . __mkdir ( os . path . join ( resources , "ui" , "styles" ) ) or res
res = self . __mkdir ( os . path . join ( resources , "external" ) ) or res
return res |
def rotate ( name , pattern = None , conf_file = default_conf , ** kwargs ) :
'''Set up pattern for logging .
name : string
alias for entryname
pattern : string
alias for log _ file
conf _ file : string
optional path to alternative configuration file
kwargs : boolean | string | int
optional additional flags and parameters
. . note : :
` ` name ` ` and ` ` pattern ` ` were kept for backwards compatibility reasons .
` ` name ` ` is an alias for the ` ` entryname ` ` argument , ` ` pattern ` ` is an alias
for ` ` log _ file ` ` . These aliases will only be used if the ` ` entryname ` ` and
` ` log _ file ` ` arguments are not passed .
For a full list of arguments see ` ` ` logadm . show _ args ` ` ` .
CLI Example :
. . code - block : : bash
salt ' * ' logadm . rotate myapplog pattern = ' / var / log / myapp / * . log ' count = 7
salt ' * ' logadm . rotate myapplog log _ file = ' / var / log / myapp / * . log ' count = 4 owner = myappd mode = ' 0700' ''' | # # cleanup kwargs
kwargs = salt . utils . args . clean_kwargs ( ** kwargs )
# # inject name into kwargs
if 'entryname' not in kwargs and name and not name . startswith ( '/' ) :
kwargs [ 'entryname' ] = name
# # inject pattern into kwargs
if 'log_file' not in kwargs :
if pattern and pattern . startswith ( '/' ) :
kwargs [ 'log_file' ] = pattern
# NOTE : for backwards compatibility check if name is a path
elif name and name . startswith ( '/' ) :
kwargs [ 'log_file' ] = name
# # build command
log . debug ( "logadm.rotate - kwargs: %s" , kwargs )
command = "logadm -f {}" . format ( conf_file )
for arg , val in kwargs . items ( ) :
if arg in option_toggles . values ( ) and val :
command = "{} {}" . format ( command , _arg2opt ( arg ) , )
elif arg in option_flags . values ( ) :
command = "{} {} {}" . format ( command , _arg2opt ( arg ) , _quote_args ( six . text_type ( val ) ) )
elif arg != 'log_file' :
log . warning ( "Unknown argument %s, don't know how to map this!" , arg )
if 'log_file' in kwargs : # NOTE : except from ` ` ` man logadm ` ` `
# If no log file name is provided on a logadm command line , the entry
# name is assumed to be the same as the log file name . For example ,
# the following two lines achieve the same thing , keeping two copies
# of rotated log files :
# % logadm - C2 - w mylog / my / really / long / log / file / name
# % logadm - C2 - w / my / really / long / log / file / name
if 'entryname' not in kwargs :
command = "{} -w {}" . format ( command , _quote_args ( kwargs [ 'log_file' ] ) )
else :
command = "{} {}" . format ( command , _quote_args ( kwargs [ 'log_file' ] ) )
log . debug ( "logadm.rotate - command: %s" , command )
result = __salt__ [ 'cmd.run_all' ] ( command , python_shell = False )
if result [ 'retcode' ] != 0 :
return dict ( Error = 'Failed in adding log' , Output = result [ 'stderr' ] )
return dict ( Result = 'Success' ) |
def wrap_line ( line , maxline = 79 , result = [ ] , count = count ) :
"""We have a line that is too long ,
so we ' re going to try to wrap it .""" | # Extract the indentation
append = result . append
extend = result . extend
indentation = line [ 0 ]
lenfirst = len ( indentation )
indent = lenfirst - len ( indentation . lstrip ( ) )
assert indent in ( 0 , lenfirst )
indentation = line . pop ( 0 ) if indent else ''
# Get splittable / non - splittable groups
dgroups = list ( delimiter_groups ( line ) )
unsplittable = dgroups [ : : 2 ]
splittable = dgroups [ 1 : : 2 ]
# If the largest non - splittable group won ' t fit
# on a line , try to add parentheses to the line .
if max ( count ( x ) for x in unsplittable ) > maxline - indent :
line = add_parens ( line , maxline , indent )
dgroups = list ( delimiter_groups ( line ) )
unsplittable = dgroups [ : : 2 ]
splittable = dgroups [ 1 : : 2 ]
# Deal with the first ( always unsplittable ) group , and
# then set up to deal with the remainder in pairs .
first = unsplittable [ 0 ]
append ( indentation )
extend ( first )
if not splittable :
return result
pos = indent + count ( first )
indentation += ' '
indent += 4
if indent >= maxline / 2 :
maxline = maxline / 2 + indent
for sg , nsg in zip ( splittable , unsplittable [ 1 : ] ) :
if sg : # If we already have stuff on the line and even
# the very first item won ' t fit , start a new line
if pos > indent and pos + len ( sg [ 0 ] ) > maxline :
append ( '\n' )
append ( indentation )
pos = indent
# Dump lines out of the splittable group
# until the entire thing fits
csg = count ( sg )
while pos + csg > maxline :
ready , sg = split_group ( sg , pos , maxline )
if ready [ - 1 ] . endswith ( ' ' ) :
ready [ - 1 ] = ready [ - 1 ] [ : - 1 ]
extend ( ready )
append ( '\n' )
append ( indentation )
pos = indent
csg = count ( sg )
# Dump the remainder of the splittable group
if sg :
extend ( sg )
pos += csg
# Dump the unsplittable group , optionally
# preceded by a linefeed .
cnsg = count ( nsg )
if pos > indent and pos + cnsg > maxline :
append ( '\n' )
append ( indentation )
pos = indent
extend ( nsg )
pos += cnsg |
def pprint ( sequence_file , annotation = None , annotation_file = None , block_length = 10 , blocks_per_line = 6 ) :
"""Pretty - print sequence ( s ) from a file .""" | annotations = [ ]
if annotation :
annotations . append ( [ ( first - 1 , last ) for first , last in annotation ] )
try : # Peek to see if this looks like a FASTA file .
line = next ( sequence_file )
if line . startswith ( '>' ) :
_pprint_fasta ( itertools . chain ( [ line ] , sequence_file ) , annotations = annotations , annotation_file = annotation_file , block_length = block_length , blocks_per_line = blocks_per_line )
else :
_pprint_line ( line . strip ( ) , annotations = annotations , annotation_file = annotation_file , block_length = block_length , blocks_per_line = blocks_per_line )
except StopIteration :
pass |
def contents ( self ) :
"""Return the list of contained directory entries , loading them
if not already loaded .""" | if not self . contents_read :
self . contents_read = True
base = self . path
for entry in os . listdir ( self . source_path ) :
source_path = os . path . join ( self . source_path , entry )
target_path = os . path . join ( base , entry )
if os . path . isdir ( source_path ) :
self . filesystem . add_real_directory ( source_path , self . read_only , target_path = target_path )
else :
self . filesystem . add_real_file ( source_path , self . read_only , target_path = target_path )
return self . byte_contents |
def _canBeExpanded ( self , headVerbRoot , headVerbWID , suitableNomAdvExpansions , expansionVerbs , widToToken ) :
'''Teeb kindlaks , kas kontekst on verbiahela laiendamiseks piisavalt selge / yhene :
1 ) Nii ' nom / adv ' kandidaate kui ka Vinf kandidaate on täpselt üks ;
2 ) Nom / adv ei kuulu mingi suurema fraasi kooseisu ( meetodi _ isLikelyNotPhrase ( ) abil ) ;
Kui tingimused täidetud , tagastab lisatava verbi listist expansionVerbs , vastasel juhul
tagastab None ;''' | if len ( suitableNomAdvExpansions ) == 1 and expansionVerbs : # Kontrollime , kas leidub t2pselt yks laiendiks sobiv verb ( kui leidub
# rohkem , on kontekst kahtlane ja raske otsustada , kas tasub laiendada
# v6i mitte )
suitableExpansionVerbs = [ expVerb for expVerb in expansionVerbs if expVerb [ 2 ] == suitableNomAdvExpansions [ 0 ] [ 2 ] ]
if len ( suitableExpansionVerbs ) == 1 : # Kontrollime , et nom / adv ei kuuluks mingi suurema fraasi kooseisu ( ei oleks fraasi
# peas6na ) ;
nomAdvWID = suitableNomAdvExpansions [ 0 ] [ 0 ]
if self . _isLikelyNotPhrase ( headVerbRoot , headVerbWID , nomAdvWID , widToToken ) :
return suitableExpansionVerbs [ 0 ]
return None |
def finish_async_rpc ( self , address , rpc_id , response ) :
"""Finish a previous asynchronous RPC .
This method should be called by a peripheral tile that previously
had an RPC called on it and chose to response asynchronously by
raising ` ` AsynchronousRPCResponse ` ` in the RPC handler itself .
The response passed to this function will be returned to the caller
as if the RPC had returned it immediately .
This method must only ever be called from a coroutine inside the
emulation loop that is handling background work on behalf of a tile .
Args :
address ( int ) : The tile address the RPC was called on .
rpc _ id ( int ) : The ID of the RPC that was called .
response ( bytes ) : The bytes that should be returned to
the caller of the RPC .""" | pending = self . _pending_rpcs . get ( address )
if pending is None :
raise ArgumentError ( "No asynchronously RPC currently in progress on tile %d" % address )
responder = pending . get ( rpc_id )
if responder is None :
raise ArgumentError ( "RPC %04X is not running asynchronous on tile %d" % ( rpc_id , address ) )
del pending [ rpc_id ]
responder . set_result ( response )
self . _rpc_queue . task_done ( ) |
async def get_creds_display_coarse ( self , filt : dict = None ) -> str :
"""Return human - readable credentials from wallet by input filter for
schema identifier and / or credential definition identifier components ;
return all credentials for no filter .
: param filt : indy - sdk filter for credentials ; i . e . ,
" schema _ id " : string , # optional
" schema _ issuer _ did " : string , # optional
" schema _ name " : string , # optional
" schema _ version " : string , # optional
" issuer _ did " : string , # optional
" cred _ def _ id " : string # optional
: return : credentials json list ; i . e . ,
" referent " : string , # credential identifier in the wallet
" attrs " : {
" attr1 " : { " raw " : " value1 " , " encoded " : " value1 _ as _ int " } ,
" attr2 " : { " raw " : " value2 " , " encoded " : " value2 _ as _ int " } ,
" schema _ id " : string ,
" cred _ def _ id " : string ,
" rev _ reg _ id " : Optional < string > ,
" cred _ rev _ id " : Optional < string >""" | LOGGER . debug ( 'HolderProver.get_creds_display_coarse >>> filt: %s' , filt )
rv_json = await anoncreds . prover_get_credentials ( self . wallet . handle , json . dumps ( filt or { } ) )
LOGGER . debug ( 'HolderProver.get_creds_display_coarse <<< %s' , rv_json )
return rv_json |
def rebin ( a , * args ) :
"""See http : / / scipy - cookbook . readthedocs . io / items / Rebinning . html
Note : integer division in the computation of ' factor ' has been
included to avoid the following runtime message :
VisibleDeprecationWarning : using a non - integer number instead of
an integer will result in an error in the future
from _ _ future _ _ import division""" | shape = a . shape
len_shape = len ( shape )
factor = np . asarray ( shape ) // np . asarray ( args )
ev_list = [ 'a.reshape(' ] + [ 'args[%d], factor[%d], ' % ( i , i ) for i in range ( len_shape ) ] + [ ')' ] + [ '.mean(%d)' % ( i + 1 ) for i in range ( len_shape ) ]
# print ( ' ' . join ( ev _ list ) )
return eval ( '' . join ( ev_list ) ) |
def _ProcessSources ( self , sources , parser_factory ) :
"""Iterates through sources yielding action responses .""" | for source in sources :
for action , request in self . _ParseSourceType ( source ) :
yield self . _RunClientAction ( action , request , parser_factory , source . path_type ) |
def normalizeGlyphUnicode ( value ) :
"""Normalizes glyph unicode .
* * * value * * must be an int or hex ( represented as a string ) .
* * * value * * must be in a unicode range .
* Returned value will be an ` ` int ` ` .""" | if not isinstance ( value , ( int , basestring ) ) or isinstance ( value , bool ) :
raise TypeError ( "Glyph unicode must be a int or hex string, not %s." % type ( value ) . __name__ )
if isinstance ( value , basestring ) :
try :
value = int ( value , 16 )
except ValueError :
raise ValueError ( "Glyph unicode hex must be a valid hex string." )
if value < 0 or value > 1114111 :
raise ValueError ( "Glyph unicode must be in the Unicode range." )
return value |
def list_indexes ( cls ) :
"""Returns a dictionary with the key as the es _ index name and the
object is a list of rdfclasses for that index
args :
None""" | cls_list = cls . list_mapped_classes ( )
rtn_obj = { }
for key , value in cls_list . items ( ) :
idx = value . es_defs . get ( 'kds_esIndex' ) [ 0 ]
try :
rtn_obj [ idx ] . append ( value )
except KeyError :
rtn_obj [ idx ] = [ value ]
return rtn_obj |
def replaceChild ( self , new_child : 'WdomElement' , old_child : 'WdomElement' ) -> Node :
"""Replace child nodes .""" | if self . connected :
self . _replace_child_web ( new_child , old_child )
return self . _replace_child ( new_child , old_child ) |
def export_configuration_generator ( self , sql , sql_args ) :
"""Generator for : class : ` meteorpi _ model . ExportConfiguration `
: param sql :
A SQL statement which must return rows describing export configurations
: param sql _ args :
Any variables required to populate the query provided in ' sql '
: return :
A generator which produces : class : ` meteorpi _ model . ExportConfiguration ` instances from the supplied SQL ,
closing any opened cursors on completion .""" | self . con . execute ( sql , sql_args )
results = self . con . fetchall ( )
output = [ ]
for result in results :
if result [ 'exportType' ] == "observation" :
search = mp . ObservationSearch . from_dict ( json . loads ( result [ 'searchString' ] ) )
elif result [ 'exportType' ] == "file" :
search = mp . FileRecordSearch . from_dict ( json . loads ( result [ 'searchString' ] ) )
else :
search = mp . ObservatoryMetadataSearch . from_dict ( json . loads ( result [ 'searchString' ] ) )
conf = mp . ExportConfiguration ( target_url = result [ 'targetURL' ] , user_id = result [ 'targetUser' ] , password = result [ 'targetPassword' ] , search = search , name = result [ 'exportName' ] , description = result [ 'description' ] , enabled = result [ 'active' ] , config_id = result [ 'exportConfigId' ] )
output . append ( conf )
return output |
def json_loads ( cls , s , ** kwargs ) :
"""A rewrap of json . loads done for one reason - to inject a custom ` cls ` kwarg
: param s :
: param kwargs :
: return :
: rtype : dict""" | if 'cls' not in kwargs :
kwargs [ 'cls' ] = cls . json_decoder
return json . loads ( s , ** kwargs ) |
def send ( self , msg ) :
"""send message to client .""" | assert isinstance ( msg , str ) , "String is required"
if self . _debug :
log . info ( "outgoing message: %s, %s" , self . id , str ( msg ) [ : 200 ] )
if self . state != STATE_OPEN :
return
self . _feed ( FRAME_MESSAGE , msg ) |
def runGetRnaQuantificationSet ( self , id_ ) :
"""Runs a getRnaQuantificationSet request for the specified ID .""" | compoundId = datamodel . RnaQuantificationSetCompoundId . parse ( id_ )
dataset = self . getDataRepository ( ) . getDataset ( compoundId . dataset_id )
rnaQuantificationSet = dataset . getRnaQuantificationSet ( id_ )
return self . runGetRequest ( rnaQuantificationSet ) |
def field_elongation ( ra , dec , date ) :
"""For a given field , calculate the solar elongation at the given date .
: param ra : field ' s right ascension . unit = " h " format = " RAh : RAm : RAs "
: param dec : field ' s declination . degrees
: param date : date at which to calculate elongation
: return : elongation from the Sun in degrees""" | sun = ephem . Sun ( )
sun . compute ( date )
sep = ephem . separation ( ( ra , dec ) , sun )
retval = 180. - math . degrees ( sep )
return retval |
def num_examples ( self ) :
"""The number of examples this subset spans .""" | if self . is_list :
return len ( self . list_or_slice )
else :
start , stop , step = self . slice_to_numerical_args ( self . list_or_slice , self . original_num_examples )
return stop - start |
def get_file_path_validator ( default_file_param = None ) :
"""Creates a namespace validator that splits out ' path ' into ' directory _ name ' and ' file _ name ' .
Allows another path - type parameter to be named which can supply a default filename .""" | def validator ( namespace ) :
if not hasattr ( namespace , 'path' ) :
return
path = namespace . path
dir_name , file_name = os . path . split ( path ) if path else ( None , '' )
if default_file_param and '.' not in file_name :
dir_name = path
file_name = os . path . split ( getattr ( namespace , default_file_param ) ) [ 1 ]
namespace . directory_name = dir_name
namespace . file_name = file_name
del namespace . path
return validator |
def fast_sweep_time_evolution ( Ep , epsilonp , gamma , omega_level , rm , xi , theta , semi_analytic = True , file_name = None , return_code = False ) :
r"""Return a spectrum of time evolutions of the density matrix .
We test a basic two - level system .
> > > import numpy as np
> > > from sympy import symbols
> > > from scipy . constants import physical _ constants
> > > e _ num = physical _ constants [ " elementary charge " ] [ 0]
> > > hbar _ num = physical _ constants [ " Planck constant over 2 pi " ] [ 0]
> > > Ne = 2
> > > Nl = 1
> > > Ep = [ - 1.0]
> > > epsilonp = [ np . array ( [ 0 , 0 , 1.0 ] ) ]
> > > delta = symbols ( " delta " )
> > > detuning _ knob = [ delta ]
> > > gamma = np . array ( [ [ 0.0 , - 1.0 ] , [ 1.0 , 0.0 ] ] )
> > > omega _ level = np . array ( [ 0.0 , 100.0 ] )
> > > rm = [ np . array ( [ [ 0.0 , 0.0 ] , [ 1.0 , 0.0 ] ] ) * hbar _ num / e _ num
. . . for p in range ( 3 ) ]
> > > xi = np . array ( [ [ [ 0 , 1 ] , [ 1 , 0 ] ] ] )
> > > theta = phase _ transformation ( Ne , Nl , rm , xi )
> > > sweep _ time _ evolution = fast _ sweep _ time _ evolution ( Ep , epsilonp , gamma ,
. . . omega _ level , rm , xi ,
. . . theta )
> > > t = np . linspace ( 0 , 1e1 , 11)
> > > unfolding = Unfolding ( Ne , True , True , True )
> > > rho0 = np . array ( [ [ 1 , 0 ] , [ 0 , 0 ] ] )
> > > rho0 = unfolding ( rho0)
> > > deltas , rho = sweep _ time _ evolution ( t , rho0 , [ [ - 20 , 20 , 5 ] ] )
> > > print ( rho . shape )
(5 , 11 , 3)
> > > print ( rho )
[ [ [ 0.0000e + 00 0.0000e + 00 0.0000e + 00]
[ 5.6205e - 04 - 1.8774e - 02 - 1.4437e - 02]
[ 1.0302e - 03 - 3.1226e - 02 - 7.3031e - 03]
[ 9.1218e - 04 - 3.0149e - 02 1.3325e - 03]
[ 6.3711e - 04 - 2.5073e - 02 2.7437e - 03]
[ 5.3438e - 04 - 2.3100e - 02 2.2977e - 04]
[ 5.8098e - 04 - 2.4044e - 02 - 1.4626e - 03]
[ 6.3808e - 04 - 2.5209e - 02 - 1.3291e - 03]
[ 6.4675e - 04 - 2.5407e - 02 - 6.4498e - 04]
[ 6.2948e - 04 - 2.5071e - 02 - 3.7457e - 04]
[ 6.1812e - 04 - 2.4841e - 02 - 4.9967e - 04 ] ]
< BLANKLINE >
[ [ 0.0000e + 00 0.0000e + 00 0.0000e + 00]
[ 5.8142e - 03 - 7.4650e - 02 1.3859e - 02]
[ 2.2458e - 03 - 4.3027e - 02 - 1.9436e - 02]
[ 2.2788e - 03 - 4.6867e - 02 8.1709e - 03]
[ 3.0571e - 03 - 5.4724e - 02 - 6.7300e - 03]
[ 2.0980e - 03 - 4.5626e - 02 - 2.2121e - 03]
[ 2.6866e - 03 - 5.1685e - 02 - 1.1906e - 03]
[ 2.4351e - 03 - 4.9072e - 02 - 3.8467e - 03]
[ 2.4572e - 03 - 4.9419e - 02 - 1.6141e - 03]
[ 2.5241e - 03 - 5.0036e - 02 - 2.8327e - 03]
[ 2.4491e - 03 - 4.9304e - 02 - 2.4541e - 03 ] ]
< BLANKLINE >
[ [ 0.0000e + 00 0.0000e + 00 0.0000e + 00]
[ 1.4361e - 01 0.0000e + 00 - 3.4458e - 01]
[ 3.0613e - 01 0.0000e + 00 - 4.1373e - 01]
[ 3.6110e - 01 0.0000e + 00 - 3.7387e - 01]
[ 3.5427e - 01 0.0000e + 00 - 3.3710e - 01]
[ 3.3835e - 01 0.0000e + 00 - 3.2630e - 01]
[ 3.3135e - 01 0.0000e + 00 - 3.2873e - 01]
[ 3.3115e - 01 0.0000e + 00 - 3.3244e - 01]
[ 3.3261e - 01 0.0000e + 00 - 3.3388e - 01]
[ 3.3343e - 01 0.0000e + 00 - 3.3383e - 01]
[ 3.3355e - 01 0.0000e + 00 - 3.3348e - 01 ] ]
< BLANKLINE >
[ [ 0.0000e + 00 0.0000e + 00 0.0000e + 00]
[ 5.8142e - 03 7.4650e - 02 1.3859e - 02]
[ 2.2458e - 03 4.3027e - 02 - 1.9436e - 02]
[ 2.2788e - 03 4.6867e - 02 8.1709e - 03]
[ 3.0571e - 03 5.4724e - 02 - 6.7300e - 03]
[ 2.0980e - 03 4.5626e - 02 - 2.2121e - 03]
[ 2.6866e - 03 5.1685e - 02 - 1.1906e - 03]
[ 2.4351e - 03 4.9072e - 02 - 3.8467e - 03]
[ 2.4572e - 03 4.9419e - 02 - 1.6141e - 03]
[ 2.5241e - 03 5.0036e - 02 - 2.8327e - 03]
[ 2.4491e - 03 4.9304e - 02 - 2.4541e - 03 ] ]
< BLANKLINE >
[ [ 0.0000e + 00 0.0000e + 00 0.0000e + 00]
[ 5.6205e - 04 1.8774e - 02 - 1.4437e - 02]
[ 1.0302e - 03 3.1226e - 02 - 7.3031e - 03]
[ 9.1218e - 04 3.0149e - 02 1.3325e - 03]
[ 6.3711e - 04 2.5073e - 02 2.7437e - 03]
[ 5.3438e - 04 2.3100e - 02 2.2977e - 04]
[ 5.8098e - 04 2.4044e - 02 - 1.4626e - 03]
[ 6.3808e - 04 2.5209e - 02 - 1.3291e - 03]
[ 6.4675e - 04 2.5407e - 02 - 6.4498e - 04]
[ 6.2948e - 04 2.5071e - 02 - 3.7457e - 04]
[ 6.1812e - 04 2.4841e - 02 - 4.9967e - 04 ] ] ]
> > > deltas , rho = sweep _ time _ evolution ( t , rho0 , [ [ - 20 , 20 , 11 ] ] ,
. . . average = True )
> > > print ( rho )
[ [ 0.0006 - 0.024 - 0.0021]
[ 0.0011 - 0.0308 - 0.0007]
[ 0.0016 - 0.0375 0.0024]
[ 0.0041 - 0.0604 - 0.0061]
[ 0.016 - 0.1175 - 0.0118]
[ 0.2999 0 . - 0.3291]
[ 0.016 0.1175 - 0.0118]
[ 0.0041 0.0604 - 0.0061]
[ 0.0016 0.0375 0.0024]
[ 0.0011 0.0308 - 0.0007]
[ 0.0006 0.024 - 0.0021 ] ]""" | # We unpack variables .
if True :
Nl = xi . shape [ 0 ]
# We determine which arguments are constants .
if True :
try :
Ep = np . array ( [ complex ( Ep [ l ] ) for l in range ( Nl ) ] )
variable_Ep = False
except :
variable_Ep = True
try :
epsilonp = [ np . array ( [ complex ( epsilonp [ l ] [ i ] ) for i in range ( 3 ) ] ) for l in range ( Nl ) ]
variable_epsilonp = False
except :
variable_epsilonp = True
# We obtain code for the time evolution .
if True :
detuning_knob = symbols ( "delta1:" + str ( Nl ) )
args = ( Ep , epsilonp , detuning_knob , gamma , omega_level , rm , xi , theta , file_name , True )
args = ( Ep , epsilonp , detuning_knob , gamma , omega_level , rm , xi , theta , True , file_name , True )
time_evolution = fast_time_evolution ( * args )
code = time_evolution + "\n\n"
# We establish the arguments of the output function .
if True :
code += "def sweep_time_evolution(t, rho0, "
if variable_Ep :
code += "Ep, "
if variable_epsilonp :
code += "epsilonp, "
code += "detuning_knob, average=False, "
code += "time_evolution=time_evolution):\n"
code += ' r"""A fast frequency sweep of the steady state."""\n'
# Code to determine the sweep range .
if True :
code += """ sweepN = -1\n"""
code += """ for i, delta in enumerate(detuning_knob):\n"""
code += """ if hasattr(delta, "__getitem__"):\n"""
code += """ sweepN = i\n"""
code += """ delta0 = delta[0]\n"""
code += """ deltaf = delta[1]\n"""
code += """ Ndelta = delta[2]\n"""
code += """ break\n\n"""
code += """ if sweepN == -1:\n"""
code += """ s = 'One of the detuning knobs '\n"""
code += """ s += 'must be of the form '\n"""
code += """ s += '(start, stop, Nsteps)'\n"""
code += """ raise ValueError(s)\n\n"""
code += """ deltas = np.linspace(delta0, deltaf, Ndelta)\n\n"""
# We call time _ evolution .
if True :
code += " args = [[t, rho0, "
if variable_Ep :
code += "Ep, "
if variable_epsilonp :
code += "epsilonp, "
code += """list(detuning_knob[:sweepN]) +\n"""
code += """ [deltas[i]] +\n"""
code += """ list(detuning_knob[sweepN+1:]), average]\n"""
code += """ for i in range(Ndelta)]\n\n"""
code += " rho = np.array([time_evolution(*argsi)\n"
code += " for argsi in args])\n\n"
# We finish the code .
if True :
code += " return deltas, rho\n"
# We write the code to file if provided , and execute it .
if True :
if file_name is not None :
f = file ( file_name + ".py" , "w" )
f . write ( code )
f . close ( )
sweep_time_evolution = code
if not return_code : exec
sweep_time_evolution
return sweep_time_evolution |
def get_id_generator ( self , name ) :
"""Creates cluster - wide : class : ` ~ hazelcast . proxy . id _ generator . IdGenerator ` .
: param name : ( str ) , name of the IdGenerator proxy .
: return : ( : class : ` ~ hazelcast . proxy . id _ generator . IdGenerator ` ) , IdGenerator proxy for the given name .""" | atomic_long = self . get_atomic_long ( ID_GENERATOR_ATOMIC_LONG_PREFIX + name )
return self . proxy . get_or_create ( ID_GENERATOR_SERVICE , name , atomic_long = atomic_long ) |
def build_alignment ( self , score , pieces ) :
"""converts a score and pieces to an alignment""" | # build text
self . open_seqs ( )
text1 = text2 = ""
end1 = end2 = None
for ( start1 , start2 , length , pctId ) in pieces :
if ( end1 != None ) :
if ( start1 == end1 ) : # insertion in sequence 2
text1 += self . seq1_gap * ( start2 - end2 )
text2 += self . seq2_file . get ( end2 , start2 - end2 )
else : # insertion in sequence 1
text1 += self . seq1_file . get ( end1 , start1 - end1 )
text2 += self . seq2_gap * ( start1 - end1 )
text1 += self . seq1_file . get ( start1 , length )
text2 += self . seq2_file . get ( start2 , length )
end1 = start1 + length
end2 = start2 + length
# create alignment
start1 = pieces [ 0 ] [ 0 ]
start2 = pieces [ 0 ] [ 1 ]
end1 = pieces [ - 1 ] [ 0 ] + pieces [ - 1 ] [ 2 ]
end2 = pieces [ - 1 ] [ 1 ] + pieces [ - 1 ] [ 2 ]
size1 = end1 - start1
size2 = end2 - start2
a = Alignment ( score = score , species_to_lengths = self . species_to_lengths )
# if ( self . seq1 _ strand = = " - " ) : start1 = self . seq1 _ file . length - end1
a . add_component ( Component ( self . seq1_src , start1 , size1 , self . seq1_strand , text = text1 ) )
# if ( self . seq2 _ strand = = " - " ) : start2 = self . seq2 _ file . length - end2
a . add_component ( Component ( self . seq2_src , start2 , size2 , self . seq2_strand , text = text2 ) )
return a |
def strip_exts ( s , exts ) :
"""Given a string and an interable of extensions , strip the extenion off the
string if the string ends with one of the extensions .""" | f_split = os . path . splitext ( s )
if f_split [ 1 ] in exts :
return f_split [ 0 ]
else :
return s |
def mask_file ( regionfile , infile , outfile , negate = False ) :
"""Created a masked version of file , using a region .
Parameters
regionfile : str
A file which can be loaded as a : class : ` AegeanTools . regions . Region ` .
The image will be masked according to this region .
infile : str
Input FITS image .
outfile : str
Output FITS image .
negate : bool
If True then pixels * outside * the region are masked .
Default = False .
See Also
: func : ` AegeanTools . MIMAS . mask _ plane `""" | # Check that the input file is accessible and then open it
if not os . path . exists ( infile ) :
raise AssertionError ( "Cannot locate fits file {0}" . format ( infile ) )
im = pyfits . open ( infile )
if not os . path . exists ( regionfile ) :
raise AssertionError ( "Cannot locate region file {0}" . format ( regionfile ) )
region = Region . load ( regionfile )
try :
wcs = pywcs . WCS ( im [ 0 ] . header , naxis = 2 )
except : # TODO : figure out what error is being thrown
wcs = pywcs . WCS ( str ( im [ 0 ] . header ) , naxis = 2 )
if len ( im [ 0 ] . data . shape ) > 2 :
data = np . squeeze ( im [ 0 ] . data )
else :
data = im [ 0 ] . data
print ( data . shape )
if len ( data . shape ) == 3 :
for plane in range ( data . shape [ 0 ] ) :
mask_plane ( data [ plane ] , wcs , region , negate )
else :
mask_plane ( data , wcs , region , negate )
im [ 0 ] . data = data
im . writeto ( outfile , overwrite = True )
logging . info ( "Wrote {0}" . format ( outfile ) )
return |
def _get_field_method ( self , tp ) :
"""Returns a reference to the form element ' s constructor method .""" | method = self . field_constructor . get ( tp )
if method and hasattr ( self , method . __name__ ) :
return getattr ( self , method . __name__ )
return method |
def load_EROS_lc ( filename = 'lm0010n22323.time' ) :
"""Read an EROS light curve and return its data .
Parameters
filename : str , optional
A light - curve filename .
Returns
dates : numpy . ndarray
An array of dates .
magnitudes : numpy . ndarray
An array of magnitudes .
errors : numpy . ndarray
An array of magnitudes errors .""" | module_path = dirname ( __file__ )
file_path = join ( module_path , 'lightcurves' , filename )
data = np . loadtxt ( file_path )
date = data [ : , 0 ]
mag = data [ : , 1 ]
err = data [ : , 2 ]
return date , mag , err |
def all_packages ( self ) :
"""Returns a list of all packages .""" | p = dict ( self . parsed_pipfile . get ( "dev-packages" , { } ) )
p . update ( self . parsed_pipfile . get ( "packages" , { } ) )
return p |
def _add_rid_to_vrf_list ( self , ri ) :
"""Add router ID to a VRF list .
In order to properly manage VRFs in the ASR , their
usage has to be tracked . VRFs are provided with neutron
router objects in their hosting _ info fields of the gateway ports .
This means that the VRF is only available when the gateway port
of the router is set . VRFs can span routers , and even OpenStack
tenants , so lists of routers that belong to the same VRF are
kept in a dictionary , with the VRF name as the key .""" | if ri . ex_gw_port or ri . router . get ( 'gw_port' ) :
driver = self . driver_manager . get_driver ( ri . id )
vrf_name = driver . _get_vrf_name ( ri )
if not vrf_name :
return
if not self . _router_ids_by_vrf . get ( vrf_name ) :
LOG . debug ( "++ CREATING VRF %s" % vrf_name )
driver . _do_create_vrf ( vrf_name )
self . _router_ids_by_vrf . setdefault ( vrf_name , set ( ) ) . add ( ri . router [ 'id' ] ) |
def login ( session , user , password , database = None , server = None ) :
"""Logs into a MyGeotab server and stores the returned credentials .
: param session : The current Session object .
: param user : The username used for MyGeotab servers . Usually an email address .
: param password : The password associated with the username . Optional if ` session _ id ` is provided .
: param database : The database or company name . Optional as this usually gets resolved upon authentication .
: param server : The server ie . my23 . geotab . com . Optional as this usually gets resolved upon authentication .""" | if not user :
user = click . prompt ( "Username" , type = str )
if not password :
password = click . prompt ( "Password" , hide_input = True , type = str )
try :
with click . progressbar ( length = 1 , label = "Logging in..." ) as progressbar :
session . login ( user , password , database , server )
progressbar . update ( 1 )
if session . credentials :
click . echo ( 'Logged in as: %s' % session . credentials )
session . load ( database )
return session . get_api ( )
except mygeotab . AuthenticationException :
click . echo ( 'Incorrect credentials. Please try again.' )
sys . exit ( 0 ) |
def stylemap ( self , definitions = True , all = True , cache = False ) :
"""return a dictionary of styles from . docx word / styles . xml , keyed to the id
( the id is used in the . docx word / document . xml rather than the style names ;
this method creates a mapping for us to use when outputting the document ) .
if definitions = = True ( default ) , then style definitions are also included .
if all = = False , then only those styles that are used are included ( slower ) .""" | if self . _stylemap is not None and cache == True :
return self . _stylemap
else :
self . _stylemap = None
# expire the cache
x = self . xml ( src = 'word/styles.xml' )
document = self . xml ( src = 'word/document.xml' )
footnotes = self . xml ( src = 'word/footnotes.xml' )
# None if no footnotes
endnotes = self . xml ( src = 'word/endnotes.xml' )
# None if no endnotes
d = Dict ( )
for s in x . root . xpath ( "w:style" , namespaces = self . NS ) :
style = Dict ( )
style . id = s . get ( "{%(w)s}styleId" % self . NS )
style . type = s . get ( "{%(w)s}type" % self . NS )
style . xpath = "//w:rStyle[@w:val='%(id)s'] | //w:pStyle[@w:val='%(id)s']" % style
if all == False :
uses = document . root . xpath ( style . xpath , namespaces = DOCX . NS )
if len ( uses ) == 0 : # try footnotes and endnotes
if footnotes is not None :
uses = footnotes . root . xpath ( style . xpath , namespaces = DOCX . NS )
if len ( uses ) == 0 :
if endnotes is not None :
uses = endnotes . root . xpath ( style . xpath , namespaces = DOCX . NS )
if len ( uses ) == 0 :
continue
LOG . debug ( "%s %r" % ( s . tag , s . attrib ) )
style . name = XML . find ( s , "w:name/@w:val" , namespaces = self . NS )
if style . name is None :
LOG . debug ( "style without name: %r" % style )
d [ style . id ] = style
LOG . debug ( style )
if definitions is True :
bo = s . find ( "{%(w)s}basedOn" % DOCX . NS )
if bo is not None :
style . basedOn = bo . get ( "{%(w)s}val" % DOCX . NS )
style . properties = Dict ( )
for pr in s . xpath ( "w:pPr/* | w:rPr/*" , namespaces = DOCX . NS ) :
tag = re . sub ( r"^\{[^}]*\}" , "" , pr . tag )
props = Dict ( )
for attr in pr . attrib . keys ( ) :
k = re . sub ( r"^\{[^}]*\}" , "" , attr )
props [ k ] = pr . get ( attr )
style . properties [ tag ] = props
if cache is True :
self . _stylemap = d
return d |
def _reset_state_mode ( self , state , mode ) :
"""Reset the state mode to the given mode , and apply the custom state options specified with this analysis .
: param state : The state to work with .
: param str mode : The state mode .
: return : None""" | state . set_mode ( mode )
state . options |= self . _state_add_options
state . options = state . options . difference ( self . _state_remove_options ) |
def is_unclaimed ( work ) :
"""Returns True if work piece is unclaimed .""" | if work [ 'is_completed' ] :
return False
cutoff_time = time . time ( ) - MAX_PROCESSING_TIME
if ( work [ 'claimed_worker_id' ] and work [ 'claimed_worker_start_time' ] is not None and work [ 'claimed_worker_start_time' ] >= cutoff_time ) :
return False
return True |
def make_name ( self ) :
"""Autogenerates a : attr : ` name ` from : attr : ` title _ for _ name `""" | if self . title :
self . name = six . text_type ( make_name ( self . title_for_name , maxlength = self . __name_length__ ) ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.