signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _load_items_from_file ( keychain , path ) :
"""Given a single file , loads all the trust objects from it into arrays and
the keychain .
Returns a tuple of lists : the first list is a list of identities , the
second a list of certs .""" | certificates = [ ]
identities = [ ]
result_array = None
with open ( path , 'rb' ) as f :
raw_filedata = f . read ( )
try :
filedata = CoreFoundation . CFDataCreate ( CoreFoundation . kCFAllocatorDefault , raw_filedata , len ( raw_filedata ) )
result_array = CoreFoundation . CFArrayRef ( )
result = Security . SecItemImport ( filedata , # cert data
None , # Filename , leaving it out for now
None , # What the type of the file is , we don ' t care
None , # what ' s in the file , we don ' t care
0 , # import flags
None , # key params , can include passphrase in the future
keychain , # The keychain to insert into
ctypes . byref ( result_array ) # Results
)
_assert_no_error ( result )
# A CFArray is not very useful to us as an intermediary
# representation , so we are going to extract the objects we want
# and then free the array . We don ' t need to keep hold of keys : the
# keychain already has them !
result_count = CoreFoundation . CFArrayGetCount ( result_array )
for index in range ( result_count ) :
item = CoreFoundation . CFArrayGetValueAtIndex ( result_array , index )
item = ctypes . cast ( item , CoreFoundation . CFTypeRef )
if _is_cert ( item ) :
CoreFoundation . CFRetain ( item )
certificates . append ( item )
elif _is_identity ( item ) :
CoreFoundation . CFRetain ( item )
identities . append ( item )
finally :
if result_array :
CoreFoundation . CFRelease ( result_array )
CoreFoundation . CFRelease ( filedata )
return ( identities , certificates ) |
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : SyncListItemContext for this SyncListItemInstance
: rtype : twilio . rest . preview . sync . service . sync _ list . sync _ list _ item . SyncListItemContext""" | if self . _context is None :
self . _context = SyncListItemContext ( self . _version , service_sid = self . _solution [ 'service_sid' ] , list_sid = self . _solution [ 'list_sid' ] , index = self . _solution [ 'index' ] , )
return self . _context |
def update ( self , event_or_list ) :
"""Update the text and position of cursor according to the event passed .""" | event_or_list = super ( ) . update ( event_or_list )
for e in event_or_list :
if e . type == KEYDOWN :
if e . key == K_RIGHT :
if e . mod * KMOD_CTRL :
self . move_cursor_one_word ( self . RIGHT )
else :
self . move_cursor_one_letter ( self . RIGHT )
elif e . key == K_LEFT :
if e . mod * KMOD_CTRL :
self . move_cursor_one_word ( self . LEFT )
else :
self . move_cursor_one_letter ( self . LEFT )
elif e . key == K_BACKSPACE :
if self . cursor == 0 :
continue
if e . mod & KMOD_CTRL :
self . delete_one_word ( self . LEFT )
else :
self . delete_one_letter ( self . LEFT )
elif e . key == K_DELETE :
if e . mod & KMOD_CTRL :
self . delete_one_word ( self . RIGHT )
else :
self . delete_one_letter ( self . RIGHT )
elif e . unicode != '' and e . unicode . isprintable ( ) :
self . add_letter ( e . unicode ) |
def penn_treebank_dataset ( directory = 'data/penn-treebank' , train = False , dev = False , test = False , train_filename = 'ptb.train.txt' , dev_filename = 'ptb.valid.txt' , test_filename = 'ptb.test.txt' , check_files = [ 'ptb.train.txt' ] , urls = [ 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.train.txt' , 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.valid.txt' , 'https://raw.githubusercontent.com/wojzaremba/lstm/master/data/ptb.test.txt' ] , unknown_token = DEFAULT_UNKNOWN_TOKEN , eos_token = DEFAULT_EOS_TOKEN ) :
"""Load the Penn Treebank dataset .
This is the Penn Treebank Project : Release 2 CDROM , featuring a million words of 1989 Wall
Street Journal material .
* * Reference : * * https : / / catalog . ldc . upenn . edu / ldc99t42
* * Citation : * *
Marcus , Mitchell P . , Marcinkiewicz , Mary Ann & Santorini , Beatrice ( 1993 ) .
Building a Large Annotated Corpus of English : The Penn Treebank
Args :
directory ( str , optional ) : Directory to cache the dataset .
train ( bool , optional ) : If to load the training split of the dataset .
dev ( bool , optional ) : If to load the development split of the dataset .
test ( bool , optional ) : If to load the test split of the dataset .
train _ filename ( str , optional ) : The filename of the training split .
dev _ filename ( str , optional ) : The filename of the development split .
test _ filename ( str , optional ) : The filename of the test split .
name ( str , optional ) : Name of the dataset directory .
check _ files ( str , optional ) : Check if these files exist , then this download was successful .
urls ( str , optional ) : URLs to download .
unknown _ token ( str , optional ) : Token to use for unknown words .
eos _ token ( str , optional ) : Token to use at the end of sentences .
Returns :
: class : ` tuple ` of : class : ` torchnlp . datasets . Dataset ` or : class : ` torchnlp . datasets . Dataset ` :
Returns between one and all dataset splits ( train , dev and test ) depending on if their
respective boolean argument is ` ` True ` ` .
Example :
> > > from torchnlp . datasets import penn _ treebank _ dataset # doctest : + SKIP
> > > train = penn _ treebank _ dataset ( train = True ) # doctest : + SKIP
> > > train [ : 10 ] # doctest : + SKIP
[ ' aer ' , ' banknote ' , ' berlitz ' , ' calloway ' , ' centrust ' , ' cluett ' , ' fromstein ' , ' gitano ' ,
' guterman ' , ' hydro - quebec ' ]""" | download_files_maybe_extract ( urls = urls , directory = directory , check_files = check_files )
ret = [ ]
splits = [ ( train , train_filename ) , ( dev , dev_filename ) , ( test , test_filename ) ]
splits = [ f for ( requested , f ) in splits if requested ]
for filename in splits :
full_path = os . path . join ( directory , filename )
text = [ ]
with io . open ( full_path , encoding = 'utf-8' ) as f :
for line in f :
text . extend ( line . replace ( '<unk>' , unknown_token ) . split ( ) )
text . append ( eos_token )
ret . append ( text )
if len ( ret ) == 1 :
return ret [ 0 ]
else :
return tuple ( ret ) |
def com_adobe_fonts_check_cff_call_depth ( ttFont ) :
"""Is the CFF subr / gsubr call depth > 10?""" | any_failures = False
cff = ttFont [ 'CFF ' ] . cff
for top_dict in cff . topDictIndex :
if hasattr ( top_dict , 'FDArray' ) :
for fd_index , font_dict in enumerate ( top_dict . FDArray ) :
if hasattr ( font_dict , 'Private' ) :
private_dict = font_dict . Private
else :
private_dict = None
failed = yield from _check_call_depth ( top_dict , private_dict , fd_index )
any_failures = any_failures or failed
else :
if hasattr ( top_dict , 'Private' ) :
private_dict = top_dict . Private
else :
private_dict = None
failed = yield from _check_call_depth ( top_dict , private_dict )
any_failures = any_failures or failed
if not any_failures :
yield PASS , 'Maximum call depth not exceeded.' |
def relieve_state_machines ( self , model , prop_name , info ) :
"""The method relieves observed models before those get removed from the list of state _ machines hold by
observed StateMachineMangerModel . The method register as observer of observable
StateMachineMangerModel . state _ machines .""" | if info [ 'method_name' ] == '__setitem__' :
pass
elif info [ 'method_name' ] == '__delitem__' :
self . relieve_model ( self . state_machine_manager_model . state_machines [ info [ 'args' ] [ 0 ] ] )
self . logger . info ( NotificationOverview ( info ) )
else :
self . logger . warning ( NotificationOverview ( info ) ) |
def get_symmetry ( cell , symprec = 1e-5 , angle_tolerance = - 1.0 ) :
"""This gives crystal symmetry operations from a crystal structure .
Args :
cell : Crystal structrue given either in Atoms object or tuple .
In the case given by a tuple , it has to follow the form below ,
( Lattice parameters in a 3x3 array ( see the detail below ) ,
Fractional atomic positions in an Nx3 array ,
Integer numbers to distinguish species in a length N array ,
( optional ) Collinear magnetic moments in a length N array ) ,
where N is the number of atoms .
Lattice parameters are given in the form :
[ [ a _ x , a _ y , a _ z ] ,
[ b _ x , b _ y , b _ z ] ,
[ c _ x , c _ y , c _ z ] ]
symprec :
float : Symmetry search tolerance in the unit of length .
angle _ tolerance :
float : Symmetry search tolerance in the unit of angle deg .
If the value is negative , an internally optimized routine
is used to judge symmetry .
Return :
A dictionary : Rotation parts and translation parts . Dictionary keys :
' rotations ' : Gives the numpy ' intc ' array of the rotation matrices .
' translations ' : Gives the numpy ' double ' array of fractional
translations with respect to a , b , c axes .""" | _set_no_error ( )
lattice , positions , numbers , magmoms = _expand_cell ( cell )
if lattice is None :
return None
multi = 48 * len ( positions )
rotation = np . zeros ( ( multi , 3 , 3 ) , dtype = 'intc' )
translation = np . zeros ( ( multi , 3 ) , dtype = 'double' )
# Get symmetry operations
if magmoms is None :
dataset = get_symmetry_dataset ( cell , symprec = symprec , angle_tolerance = angle_tolerance )
if dataset is None :
return None
else :
return { 'rotations' : dataset [ 'rotations' ] , 'translations' : dataset [ 'translations' ] , 'equivalent_atoms' : dataset [ 'equivalent_atoms' ] }
else :
equivalent_atoms = np . zeros ( len ( magmoms ) , dtype = 'intc' )
num_sym = spg . symmetry_with_collinear_spin ( rotation , translation , equivalent_atoms , lattice , positions , numbers , magmoms , symprec , angle_tolerance )
_set_error_message ( )
if num_sym == 0 :
return None
else :
return { 'rotations' : np . array ( rotation [ : num_sym ] , dtype = 'intc' , order = 'C' ) , 'translations' : np . array ( translation [ : num_sym ] , dtype = 'double' , order = 'C' ) , 'equivalent_atoms' : equivalent_atoms } |
def get_next ( self ) :
"""Returns the next : obj : ` Gtk . TreeModelRow ` or None""" | next_iter = self . model . iter_next ( self . iter )
if next_iter :
return TreeModelRow ( self . model , next_iter ) |
def id ( opts ) :
'''Return a unique ID for this proxy minion . This ID MUST NOT CHANGE .
If it changes while the proxy is running the salt - master will get
really confused and may stop talking to this minion''' | r = salt . utils . http . query ( opts [ 'proxy' ] [ 'url' ] + 'id' , decode_type = 'json' , decode = True )
return r [ 'dict' ] [ 'id' ] . encode ( 'ascii' , 'ignore' ) |
def on_palette_name_changed ( self , combo ) :
"""Changes the value of palette in dconf""" | palette_name = combo . get_active_text ( )
if palette_name not in PALETTES :
return
self . settings . styleFont . set_string ( 'palette' , PALETTES [ palette_name ] )
self . settings . styleFont . set_string ( 'palette-name' , palette_name )
self . set_palette_colors ( PALETTES [ palette_name ] )
self . update_demo_palette ( PALETTES [ palette_name ] ) |
def day_interval ( year , month , day , milliseconds = False , return_string = False ) :
"""Return a start datetime and end datetime of a day .
: param milliseconds : Minimum time resolution .
: param return _ string : If you want string instead of datetime , set True
Usage Example : :
> > > start , end = rolex . day _ interval ( 2014 , 6 , 17)
> > > start
datetime ( 2014 , 6 , 17 , 0 , 0 , 0)
> > > end
datetime ( 2014 , 6 , 17 , 23 , 59 , 59)""" | if milliseconds : # pragma : no cover
delta = timedelta ( milliseconds = 1 )
else :
delta = timedelta ( seconds = 1 )
start = datetime ( year , month , day )
end = datetime ( year , month , day ) + timedelta ( days = 1 ) - delta
if not return_string :
return start , end
else :
return str ( start ) , str ( end ) |
def gen_challenge ( self , state ) :
"""This function generates a challenge for given state . It selects a
random number and sets that as the challenge key . By default , v _ max
is set to the prime , and the number of chunks to challenge is the
number of chunks in the file . ( this doesn ' t guarantee that the whole
file will be checked since some chunks could be selected twice and
some selected none .
: param state : the state to use . it can be encrypted , as it will
have just been received from the server""" | state . decrypt ( self . key )
chal = Challenge ( state . chunks , self . prime , Random . new ( ) . read ( 32 ) )
return chal |
def activate_pipeline ( self ) :
"""Activates a deployed pipeline , useful for OnDemand pipelines""" | self . client . activate_pipeline ( pipelineId = self . pipeline_id )
LOG . info ( "Activated Pipeline %s" , self . pipeline_id ) |
def make_url ( self , path , api_root = u'/v2/' ) :
"""Gets a full URL from just path .""" | return urljoin ( urljoin ( self . url , api_root ) , path ) |
def update_resource_properties ( r , orig_columns = { } , force = False ) :
"""Get descriptions and other properties from this , or upstream , packages , and add them to the schema .""" | added = [ ]
schema_term = r . schema_term
if not schema_term :
warn ( "No schema term for " , r . name )
return
rg = r . raw_row_generator
# Get columns information from the schema , or , if it is a package reference ,
# from the upstream schema
upstream_columns = { e [ 'name' ] . lower ( ) if e [ 'name' ] else '' : e for e in r . columns ( ) or { } }
# Just from the local schema
schema_columns = { e [ 'name' ] . lower ( ) if e [ 'name' ] else '' : e for e in r . schema_columns or { } }
# Ask the generator if it can provide column descriptions and types
generator_columns = { e [ 'name' ] . lower ( ) if e [ 'name' ] else '' : e for e in rg . columns or { } }
def get_col_value ( col_name , value_name ) :
v = None
if not col_name :
return None
for d in [ generator_columns , upstream_columns , orig_columns , schema_columns ] :
v_ = d . get ( col_name . lower ( ) , { } ) . get ( value_name )
if v_ :
v = v_
return v
# Look for new properties
extra_properties = set ( )
for d in [ generator_columns , upstream_columns , orig_columns , schema_columns ] :
for k , v in d . items ( ) :
for kk , vv in v . items ( ) :
extra_properties . add ( kk )
# Remove the properties that are already accounted for
extra_properties = extra_properties - { 'pos' , 'header' , 'name' , '' }
# Add any extra properties , such as from upstream packages , to the schema .
for ep in extra_properties :
r . doc [ 'Schema' ] . add_arg ( ep )
for c in schema_term . find ( 'Table.Column' ) :
for ep in extra_properties :
t = c . get_or_new_child ( ep )
v = get_col_value ( c . name , ep )
if v :
t . value = v
added . append ( ( c . name , ep , v ) )
prt ( 'Updated schema for {}. Set {} properties' . format ( r . name , len ( added ) ) ) |
def build_url_field ( self , field_name , model_class ) :
"""Create a field representing the object ' s own URL .""" | field_class = self . serializer_url_field
field_kwargs = rest_framework . serializers . get_url_kwargs ( model_class )
field_kwargs . update ( { "parent_lookup_field" : self . get_parent_lookup_field ( ) } )
return field_class , field_kwargs |
def publish_topology_description_changed ( self , previous_description , new_description , topology_id ) :
"""Publish a TopologyDescriptionChangedEvent to all topology listeners .
: Parameters :
- ` previous _ description ` : The previous topology description .
- ` new _ description ` : The new topology description .
- ` topology _ id ` : A unique identifier for the topology this server
is a part of .""" | event = TopologyDescriptionChangedEvent ( previous_description , new_description , topology_id )
for subscriber in self . __topology_listeners :
try :
subscriber . description_changed ( event )
except Exception :
_handle_exception ( ) |
def has_chess960_castling_rights ( self ) -> bool :
"""Checks if there are castling rights that are only possible in Chess960.""" | # Get valid Chess960 castling rights .
chess960 = self . chess960
self . chess960 = True
castling_rights = self . clean_castling_rights ( )
self . chess960 = chess960
# Standard chess castling rights can only be on the standard
# starting rook squares .
if castling_rights & ~ BB_CORNERS :
return True
# If there are any castling rights in standard chess , the king must be
# on e1 or e8.
if castling_rights & BB_RANK_1 and not self . occupied_co [ WHITE ] & self . kings & BB_E1 :
return True
if castling_rights & BB_RANK_8 and not self . occupied_co [ BLACK ] & self . kings & BB_E8 :
return True
return False |
def filter_publication ( publication , cache = _CACHE ) :
"""Deduplication function , which compares ` publication ` with samples stored in
` cache ` . If the match NOT is found , ` publication ` is returned , else None .
Args :
publication ( obj ) : : class : ` . Publication ` instance .
cache ( obj ) : Cache which is used for lookups .
Returns :
obj / None : Depends whether the object is found in cache or not .""" | if cache is None :
cache = load_cache ( )
if publication . _get_hash ( ) in cache :
return None
cache . update ( [ publication . _get_hash ( ) ] )
save_cache ( cache )
return publication |
def inherit_doc ( cls ) :
"""A decorator that makes a class inherit documentation from its parents .""" | for name , func in vars ( cls ) . items ( ) : # only inherit docstring for public functions
if name . startswith ( "_" ) :
continue
if not func . __doc__ :
for parent in cls . __bases__ :
parent_func = getattr ( parent , name , None )
if parent_func and getattr ( parent_func , "__doc__" , None ) :
func . __doc__ = parent_func . __doc__
break
return cls |
def report_sections ( self ) :
"""Add results from Qualimap BamQC parsing to the report""" | # Append to self . sections list
if len ( self . qualimap_bamqc_coverage_hist ) > 0 : # Chew back on histogram to prevent long flat tail
# ( find a sensible max x - lose 1 % of longest tail )
max_x = 0
total_bases_by_sample = dict ( )
for s_name , d in self . qualimap_bamqc_coverage_hist . items ( ) :
total_bases_by_sample [ s_name ] = sum ( d . values ( ) )
cumulative = 0
for count in sorted ( d . keys ( ) , reverse = True ) :
cumulative += d [ count ]
if cumulative / total_bases_by_sample [ s_name ] > 0.01 :
max_x = max ( max_x , count )
break
rates_within_threshs = dict ( )
for s_name , hist in self . qualimap_bamqc_coverage_hist . items ( ) :
total = total_bases_by_sample [ s_name ]
# Make a range of depths that isn ' t stupidly huge for high coverage expts
depth_range = list ( range ( 0 , max_x + 1 , math . ceil ( float ( max_x ) / 400.0 ) if max_x > 0 else 1 ) )
# Check that we have our specified coverages in the list
for c in self . covs :
if int ( c ) not in depth_range :
depth_range . append ( int ( c ) )
# Calculate the coverage rates for this range of coverages
rates_within_threshs [ s_name ] = _calculate_bases_within_thresholds ( hist , total , depth_range )
# Add requested coverage levels to the General Statistics table
for c in self . covs :
if int ( c ) in rates_within_threshs [ s_name ] :
self . general_stats_data [ s_name ] [ '{}_x_pc' . format ( c ) ] = rates_within_threshs [ s_name ] [ int ( c ) ]
else :
self . general_stats_data [ s_name ] [ '{}_x_pc' . format ( c ) ] = 0
# Section 1 - BamQC Coverage Histogram
coverage_histogram_helptext = '''
For a set of DNA or RNA reads mapped to a reference sequence, such as a genome
or transcriptome, the depth of coverage at a given base position is the number
of high-quality reads that map to the reference at that position
(<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>).
QualiMap groups the bases of a reference sequence by their depth of coverage
(*0×, 1×, …, N×*), then plots the number of bases of the
reference (y-axis) at each level of coverage depth (x-axis). This plot shows
the frequency of coverage depths relative to the reference sequence for each
read dataset, which provides an indirect measure of the level and variation of
coverage depth in the corresponding sequenced sample.
If reads are randomly distributed across the reference sequence, this plot
should resemble a Poisson distribution (<a href="https://doi.org/10.1016/0888-7543(88)90007-9"
target="_blank">Lander & Waterman 1988</a>), with a peak indicating approximate
depth of coverage, and more uniform coverage depth being reflected in a narrower
spread. The optimal level of coverage depth depends on the aims of the
experiment, though it should at minimum be sufficiently high to adequately
address the biological question; greater uniformity of coverage is generally
desirable, because it increases breadth of coverage for a given depth of
coverage, allowing equivalent results to be achieved at a lower sequencing depth
(<a href="https://doi.org/10.1002/gepi.20575" target="_blank">Sampson
et al. 2011</a>; <a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims
et al. 2014</a>). However, it is difficult to achieve uniform coverage
depth in practice, due to biases introduced during sample preparation
(<a href="https://doi.org/10.1016/j.yexcr.2014.01.008" target="_blank">van
Dijk et al. 2014</a>), sequencing (<a href="https://doi.org/10.1186/gb-2013-14-5-r51"
target="_blank">Ross et al. 2013</a>) and read mapping
(<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>).
This plot may include a small peak for regions of the reference sequence with
zero depth of coverage. Such regions may be absent from the given sample (due
to a deletion or structural rearrangement), present in the sample but not
successfully sequenced (due to bias in sequencing or preparation), or sequenced
but not successfully mapped to the reference (due to the choice of mapping
algorithm, the presence of repeat sequences, or mismatches caused by variants
or sequencing errors). Related factors cause most datasets to contain some
unmapped reads (<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims
et al. 2014</a>).'''
self . add_section ( name = 'Coverage histogram' , anchor = 'qualimap-coverage-histogram' , description = 'Distribution of the number of locations in the reference genome with a given depth of coverage.' , helptext = coverage_histogram_helptext , plot = linegraph . plot ( self . qualimap_bamqc_coverage_hist , { 'id' : 'qualimap_coverage_histogram' , 'title' : 'Qualimap BamQC: Coverage histogram' , 'ylab' : 'Genome bin counts' , 'xlab' : 'Coverage (X)' , 'ymin' : 0 , 'xmin' : 0 , 'xmax' : max_x , 'xDecimals' : False , 'tt_label' : '<b>{point.x}X</b>: {point.y}' , } ) )
# Section 2 - BamQC cumulative coverage genome fraction
genome_fraction_helptext = '''
For a set of DNA or RNA reads mapped to a reference sequence, such as a genome
or transcriptome, the depth of coverage at a given base position is the number
of high-quality reads that map to the reference at that position, while the
breadth of coverage is the fraction of the reference sequence to which reads
have been mapped with at least a given depth of coverage
(<a href="https://doi.org/10.1038/nrg3642" target="_blank">Sims et al. 2014</a>).
Defining coverage breadth in terms of coverage depth is useful, because
sequencing experiments typically require a specific minimum depth of coverage
over the region of interest (<a href="https://doi.org/10.1038/nrg3642"
target="_blank">Sims et al. 2014</a>), so the extent of the reference sequence
that is amenable to analysis is constrained to lie within regions that have
sufficient depth. With inadequate sequencing breadth, it can be difficult to
distinguish the absence of a biological feature (such as a gene) from a lack
of data (<a href="https://doi.org/10.1101/gr.7050807" target="_blank">Green 2007</a>).
For increasing coverage depths (*1×, 2×, …, N×*),
QualiMap calculates coverage breadth as the percentage of the reference
sequence that is covered by at least that number of reads, then plots
coverage breadth (y-axis) against coverage depth (x-axis). This plot
shows the relationship between sequencing depth and breadth for each read
dataset, which can be used to gauge, for example, the likely effect of a
minimum depth filter on the fraction of a genome available for analysis.'''
self . add_section ( name = 'Cumulative genome coverage' , anchor = 'qualimap-cumulative-genome-fraction-coverage' , description = 'Percentage of the reference genome with at least the given depth of coverage.' , helptext = genome_fraction_helptext , plot = linegraph . plot ( rates_within_threshs , { 'id' : 'qualimap_genome_fraction' , 'title' : 'Qualimap BamQC: Genome fraction covered by at least X reads' , 'ylab' : 'Fraction of reference (%)' , 'xlab' : 'Coverage (X)' , 'ymax' : 100 , 'ymin' : 0 , 'xmin' : 0 , 'xmax' : max_x , 'xDecimals' : False , 'tt_label' : '<b>{point.x}X</b>: {point.y:.2f}%' , } ) )
# Section 3 - Insert size histogram
if len ( self . qualimap_bamqc_insert_size_hist ) > 0 :
insert_size_helptext = '''
To overcome limitations in the length of DNA or RNA sequencing reads,
many sequencing instruments can produce two or more shorter reads from
one longer fragment in which the relative position of reads is
approximately known, such as paired-end or mate-pair reads
(<a href="https://doi.org/10.1146/annurev-anchem-062012-092628"
target="_blank">Mardis 2013</a>). Such techniques can extend the reach
of sequencing technology, allowing for more accurate placement of reads
(<a href="https://doi.org/10.1146/annurev-genom-090413-025358"
target="_blank">Reinert et al. 2015</a>) and better resolution of repeat
regions (<a href="https://doi.org/10.1146/annurev-genom-090413-025358"
target="_blank">Reinert et al. 2015</a>), as well as detection of
structural variation (<a href="https://doi.org/10.1038/nrg2958"
target="_blank">Alkan et al. 2011</a>) and chimeric transcripts
(<a href="https://doi.org/10.1073/pnas.0904720106"
target="_blank">Maher et al. 2009</a>).
All these methods assume that the approximate size of an insert is known.
(Insert size can be defined as the length in bases of a sequenced DNA or
RNA fragment, excluding technical sequences such as adapters, which are
typically removed before alignment.) This plot allows for that assumption
to be assessed. With the set of mapped fragments for a given sample, QualiMap
groups the fragments by insert size, then plots the frequency of mapped
fragments (y-axis) over a range of insert sizes (x-axis). In an ideal case,
the distribution of fragment sizes for a sequencing library would culminate
in a single peak indicating average insert size, with a narrow spread
indicating highly consistent fragment lengths.
QualiMap calculates insert sizes as follows: for each fragment in which
every read mapped successfully to the same reference sequence, it
extracts the insert size from the `TLEN` field of the leftmost read
(see the <a href="http://qualimap.bioinfo.cipf.es/doc_html/index.html"
target="_blank">Qualimap 2 documentation</a>), where the `TLEN` (or
'observed Template LENgth') field contains 'the number of bases from the
leftmost mapped base to the rightmost mapped base'
(<a href="https://samtools.github.io/hts-specs/" target="_blank">SAM
format specification</a>). Note that because it is defined in terms of
alignment to a reference sequence, the value of the `TLEN` field may
differ from the insert size due to factors such as alignment clipping,
alignment errors, or structural variation or splicing in a gap between
reads from the same fragment.'''
self . add_section ( name = 'Insert size histogram' , anchor = 'qualimap-insert-size-histogram' , description = 'Distribution of estimated insert sizes of mapped reads.' , helptext = insert_size_helptext , plot = linegraph . plot ( self . qualimap_bamqc_insert_size_hist , { 'id' : 'qualimap_insert_size' , 'title' : 'Qualimap BamQC: Insert size histogram' , 'ylab' : 'Fraction of reads' , 'xlab' : 'Insert Size (bp)' , 'ymin' : 0 , 'xmin' : 0 , 'tt_label' : '<b>{point.x} bp</b>: {point.y}' , } ) )
# Section 4 - GC - content distribution
if len ( self . qualimap_bamqc_gc_content_dist ) > 0 :
gc_content_helptext = '''
GC bias is the difference between the guanine-cytosine content
(GC-content) of a set of sequencing reads and the GC-content of the DNA
or RNA in the original sample. It is a well-known issue with sequencing
systems, and may be introduced by PCR amplification, among other factors
(<a href="https://doi.org/10.1093/nar/gks001" target="_blank">Benjamini
& Speed 2012</a>; <a href="https://doi.org/10.1186/gb-2013-14-5-r51"
target="_blank">Ross et al. 2013</a>).
QualiMap calculates the GC-content of individual mapped reads, then
groups those reads by their GC-content (*1%, 2%, …, 100%*), and
plots the frequency of mapped reads (y-axis) at each level of GC-content
(x-axis). This plot shows the GC-content distribution of mapped reads
for each read dataset, which should ideally resemble that of the
original sample. It can be useful to display the GC-content distribution
of an appropriate reference sequence for comparison, and QualiMap has an
option to do this (see the <a href="http://qualimap.bioinfo.cipf.es/doc_html/index.html"
target="_blank">Qualimap 2 documentation</a>).'''
extra_series = [ ]
for i , ( species_name , species_data ) in enumerate ( sorted ( self . qualimap_bamqc_gc_by_species . items ( ) ) ) :
extra_series . append ( { 'name' : species_name , 'data' : list ( species_data . items ( ) ) , 'dashStyle' : 'Dash' , 'lineWidth' : 1 , 'color' : [ '#000000' , '#E89191' ] [ i % 2 ] , } )
if len ( self . qualimap_bamqc_gc_content_dist ) == 1 :
desc = 'The solid line represents the distribution of GC content of mapped reads for the sample.'
else :
desc = 'Each solid line represents the distribution of GC content of mapped reads for a given sample.'
lg_config = { 'id' : 'qualimap_gc_content' , 'title' : 'Qualimap BamQC: GC content distribution' , 'ylab' : 'Fraction of reads' , 'xlab' : 'GC content (%)' , 'ymin' : 0 , 'xmin' : 0 , 'xmax' : 100 , 'tt_label' : '<b>{point.x}%</b>: {point.y:.3f}' }
if len ( extra_series ) == 1 :
desc += ' The dotted line represents a pre-calculated GC distribution for the reference genome.'
lg_config [ 'extra_series' ] = extra_series
elif len ( extra_series ) > 1 :
desc += ' Each dotted line represents a pre-calculated GC distribution for a specific reference genome.'
lg_config [ 'extra_series' ] = extra_series
self . add_section ( name = 'GC content distribution' , anchor = 'qualimap-gc-distribution' , description = desc , helptext = gc_content_helptext , plot = linegraph . plot ( self . qualimap_bamqc_gc_content_dist , lg_config ) ) |
def calculate_fitness ( self ) :
"""Calculcate your fitness .""" | if self . fitness is not None :
raise Exception ( "You are calculating the fitness of agent {}, " . format ( self . id ) + "but they already have a fitness" )
said_blue = self . infos ( type = Meme ) [ 0 ] . contents == "blue"
proportion = float ( max ( self . network . nodes ( type = RogersEnvironment ) [ 0 ] . infos ( ) , key = attrgetter ( "id" ) , ) . contents )
self . proportion = proportion
is_blue = proportion > 0.5
if said_blue is is_blue :
self . score = 1
else :
self . score = 0
is_asocial = self . infos ( type = LearningGene ) [ 0 ] . contents == "asocial"
e = 2
b = 1
c = 0.3 * b
baseline = c + 0.0001
self . fitness = ( baseline + self . score * b - is_asocial * c ) ** e |
def RegisterTextKey ( cls , key , frameid ) :
"""Register a text key .
If the key you need to register is a simple one - to - one mapping
of ID3 frame name to EasyID3 key , then you can use this
function : :
EasyID3 . RegisterTextKey ( " title " , " TIT2 " )""" | def getter ( id3 , key ) :
return list ( id3 [ frameid ] )
def setter ( id3 , key , value ) :
try :
frame = id3 [ frameid ]
except KeyError :
id3 . add ( mutagen . id3 . Frames [ frameid ] ( encoding = 3 , text = value ) )
else :
frame . encoding = 3
frame . text = value
def deleter ( id3 , key ) :
del ( id3 [ frameid ] )
cls . RegisterKey ( key , getter , setter , deleter ) |
def date_range ( cls , start_time , end_time , freq ) :
'''Returns a new SArray that represents a fixed frequency datetime index .
Parameters
start _ time : datetime . datetime
Left bound for generating dates .
end _ time : datetime . datetime
Right bound for generating dates .
freq : datetime . timedelta
Fixed frequency between two consecutive data points .
Returns
out : SArray
Examples
> > > import datetime as dt
> > > start = dt . datetime ( 2013 , 5 , 7 , 10 , 4 , 10)
> > > end = dt . datetime ( 2013 , 5 , 10 , 10 , 4 , 10)
> > > sa = tc . SArray . date _ range ( start , end , dt . timedelta ( 1 ) )
> > > print sa
dtype : datetime
Rows : 4
[ datetime . datetime ( 2013 , 5 , 7 , 10 , 4 , 10 ) ,
datetime . datetime ( 2013 , 5 , 8 , 10 , 4 , 10 ) ,
datetime . datetime ( 2013 , 5 , 9 , 10 , 4 , 10 ) ,
datetime . datetime ( 2013 , 5 , 10 , 10 , 4 , 10 ) ]''' | if not isinstance ( start_time , datetime . datetime ) :
raise TypeError ( "The ``start_time`` argument must be from type datetime.datetime." )
if not isinstance ( end_time , datetime . datetime ) :
raise TypeError ( "The ``end_time`` argument must be from type datetime.datetime." )
if not isinstance ( freq , datetime . timedelta ) :
raise TypeError ( "The ``freq`` argument must be from type datetime.timedelta." )
from . . import extensions
return extensions . date_range ( start_time , end_time , freq . total_seconds ( ) ) |
def match_or ( self , tokens , item ) :
"""Matches or .""" | for x in range ( 1 , len ( tokens ) ) :
self . duplicate ( ) . match ( tokens [ x ] , item )
with self . only_self ( ) :
self . match ( tokens [ 0 ] , item ) |
def pack ( self , grads ) :
"""Args :
grads ( list ) : list of gradient tensors
Returns :
packed list of gradient tensors to be aggregated .""" | for i , g in enumerate ( grads ) :
assert g . shape == self . _shapes [ i ]
with cached_name_scope ( "GradientPacker" , top_level = False ) :
concat_grads = tf . concat ( [ tf . reshape ( g , [ - 1 ] ) for g in grads ] , 0 , name = 'concatenated_grads' )
# concat _ grads = tf . cast ( concat _ grads , tf . float16)
grad_packs = tf . split ( concat_grads , self . _split_sizes )
return grad_packs |
def encode_properties ( parameters ) :
"""Performs encoding of url parameters from dictionary to a string . It does
not escape backslash because it is not needed .
See : http : / / www . jfrog . com / confluence / display / RTF / Artifactory + REST + API # ArtifactoryRESTAPI - SetItemProperties""" | result = [ ]
for param in iter ( sorted ( parameters ) ) :
if isinstance ( parameters [ param ] , ( list , tuple ) ) :
value = ',' . join ( [ escape_chars ( x ) for x in parameters [ param ] ] )
else :
value = escape_chars ( parameters [ param ] )
result . append ( "%s=%s" % ( param , value ) )
return '|' . join ( result ) |
def flush ( bank , key = None ) :
'''Remove the key from the cache bank with all the key content .''' | if key is None :
c_key = bank
else :
c_key = '{0}/{1}' . format ( bank , key )
try :
return api . kv . delete ( c_key , recurse = key is None )
except Exception as exc :
raise SaltCacheError ( 'There was an error removing the key, {0}: {1}' . format ( c_key , exc ) ) |
def u_get ( self , quant ) :
"""Return a number using the given quantity of unsigned bits .""" | if not quant :
return
bits = [ ]
while quant :
if self . _count == 0 :
byte = self . src . read ( 1 )
number = struct . unpack ( "<B" , byte ) [ 0 ]
self . _bits = bin ( number ) [ 2 : ] . zfill ( 8 )
self . _count = 8
if quant > self . _count :
self . _count , quant , toget = 0 , quant - self . _count , self . _count
else :
self . _count , quant , toget = self . _count - quant , 0 , quant
read , self . _bits = self . _bits [ : toget ] , self . _bits [ toget : ]
bits . append ( read )
data = int ( "" . join ( bits ) , 2 )
return data |
def get_backend_mod ( name = None ) :
"""Returns the imported module for the given backend name
Parameters
name : ` str ` , optional
the name of the backend , defaults to the current backend .
Returns
backend _ mod : ` module `
the module as returned by : func : ` importlib . import _ module `
Examples
> > > from gwpy . plot . plot import get _ backend _ mod
> > > print ( get _ backend _ mod ( ' agg ' ) )
< module ' matplotlib . backends . backend _ agg ' from . . . >""" | if name is None :
name = get_backend ( )
backend_name = ( name [ 9 : ] if name . startswith ( "module://" ) else "matplotlib.backends.backend_{}" . format ( name . lower ( ) ) )
return importlib . import_module ( backend_name ) |
def classify_identifier ( did ) :
"""Return a text fragment classifying the ` ` did ` `
Return < UNKNOWN > if the DID could not be classified . This should not normally happen
and may indicate that the DID was orphaned in the database .""" | if _is_unused_did ( did ) :
return 'unused on this Member Node'
elif is_sid ( did ) :
return 'a Series ID (SID) of a revision chain'
elif is_local_replica ( did ) :
return 'a Persistent ID (PID) of a local replica'
elif is_unprocessed_local_replica ( did ) :
return ( 'a Persistent ID (PID) of an accepted but not yet processed local replica' )
elif is_archived ( did ) :
return 'a Persistent ID (PID) of a previously archived local object'
elif is_obsoleted ( did ) :
return 'a Persistent ID (PID) of a previously updated (obsoleted) local object'
elif is_resource_map_db ( did ) :
return 'a Persistent ID (PID) of a local resource map'
elif is_existing_object ( did ) :
return 'a Persistent ID (PID) of an existing local object'
elif is_revision_chain_placeholder ( did ) :
return ( 'a Persistent ID (PID) of a remote or non-existing revision of a local ' 'replica' )
elif is_resource_map_member ( did ) :
return ( 'a Persistent ID (PID) of a remote or non-existing object aggregated in ' 'a local Resource Map' )
logger . warning ( 'Unable to classify known identifier. did="{}"' . format ( did ) )
return '<UNKNOWN>' |
def raw_sensor_strings ( self ) :
"""Reads the raw strings from the kernel module sysfs interface
: returns : raw strings containing all bytes from the sensor memory
: rtype : str
: raises NoSensorFoundError : if the sensor could not be found
: raises SensorNotReadyError : if the sensor is not ready yet""" | try :
with open ( self . sensorpath , "r" ) as f :
data = f . readlines ( )
except IOError :
raise NoSensorFoundError ( self . type_name , self . id )
if data [ 0 ] . strip ( ) [ - 3 : ] != "YES" :
raise SensorNotReadyError ( self )
return data |
def _prep_callable_bed ( in_file , work_dir , stats , data ) :
"""Sort and merge callable BED regions to prevent SV double counting""" | out_file = os . path . join ( work_dir , "%s-merge.bed.gz" % utils . splitext_plus ( os . path . basename ( in_file ) ) [ 0 ] )
gsort = config_utils . get_program ( "gsort" , data )
if not utils . file_uptodate ( out_file , in_file ) :
with file_transaction ( data , out_file ) as tx_out_file :
fai_file = ref . fasta_idx ( dd . get_ref_file ( data ) )
cmd = ( "{gsort} {in_file} {fai_file} | bedtools merge -i - -d {stats[merge_size]} | " "bgzip -c > {tx_out_file}" )
do . run ( cmd . format ( ** locals ( ) ) , "Prepare SV callable BED regions" )
return vcfutils . bgzip_and_index ( out_file , data [ "config" ] ) |
def _clause_formatter ( self , cond ) :
'''Formats conditions
args is a list of [ ' field ' , ' operator ' , ' value ' ]''' | if len ( cond ) == 2 :
cond = ' ' . join ( cond )
return cond
if 'in' in cond [ 1 ] . lower ( ) :
if not isinstance ( cond [ 2 ] , ( tuple , list ) ) :
raise TypeError ( '("{0}") must be of type <type tuple> or <type list>' . format ( cond [ 2 ] ) )
if 'select' not in cond [ 2 ] [ 0 ] . lower ( ) :
cond [ 2 ] = "({0})" . format ( ',' . join ( map ( str , [ "'{0}'" . format ( e ) for e in cond [ 2 ] ] ) ) )
else :
cond [ 2 ] = "({0})" . format ( ',' . join ( map ( str , [ "{0}" . format ( e ) for e in cond [ 2 ] ] ) ) )
cond = " " . join ( cond )
else : # if isinstance ( cond [ 2 ] , str ) :
# var = re . match ( ' ^ @ ( \ w + ) $ ' , cond [ 2 ] )
# else :
# var = None
# if var :
if isinstance ( cond [ 2 ] , str ) and cond [ 2 ] . startswith ( '@' ) :
cond [ 2 ] = "{0}" . format ( cond [ 2 ] )
else :
cond [ 2 ] = "'{0}'" . format ( cond [ 2 ] )
cond = ' ' . join ( cond )
return cond |
def merge_into ( self , other ) :
"""Merge two simple selectors together . This is expected to be the
selector being injected into ` other ` - - that is , ` other ` is the
selector for a block using ` ` @ extend ` ` , and ` self ` is a selector being
extended .
Element tokens must come first , and pseudo - element tokens must come
last , and there can only be one of each . The final selector thus looks
something like : :
[ element ] [ misc self tokens ] [ misc other tokens ] [ pseudo - element ]
This method does not check for duplicate tokens ; those are assumed to
have been removed earlier , during the search for a hinge .""" | # TODO it shouldn ' t be possible to merge two elements or two pseudo
# elements , / but / it shouldn ' t just be a fatal error here - - it
# shouldn ' t even be considered a candidate for extending !
# TODO this is slightly inconsistent with ruby , which treats a trailing
# set of self tokens like ' : before . foo ' as a single unit to be stuck at
# the end . but that ' s completely bogus anyway .
element = [ ]
middle = [ ]
pseudo = [ ]
for token in self . tokens + other . tokens :
if token in CSS2_PSEUDO_ELEMENTS or token . startswith ( '::' ) :
pseudo . append ( token )
elif token [ 0 ] in BODY_TOKEN_SIGILS :
middle . append ( token )
else :
element . append ( token )
new_tokens = element + middle + pseudo
if self . combinator == ' ' or self . combinator == other . combinator :
combinator = other . combinator
elif other . combinator == ' ' :
combinator = self . combinator
else :
raise ValueError ( "Don't know how to merge conflicting combinators: " "{0!r} and {1!r}" . format ( self , other ) )
return type ( self ) ( combinator , new_tokens ) |
def install_config_kibana ( self ) :
"""install and config kibana
: return :""" | if self . prompt_check ( "Download and install kibana" ) :
self . kibana_install ( )
if self . prompt_check ( "Configure and autostart kibana" ) :
self . kibana_config ( ) |
def read_in_survey_parameters ( log , pathToSettingsFile ) :
"""* First reads in the mcs _ settings . yaml file to determine the name of the settings file to read in the survey parameters . *
* * Key Arguments : * *
- ` ` log ` ` - - logger
- ` ` pathToSettingsFile ` ` - - path to the settings file for the simulation
* * Return : * *
- a tuple of settings lists and dictionaries""" | # # # # # # > IMPORTS # # # # #
# # STANDARD LIB # #
# # THIRD PARTY # #
import yaml
# # LOCAL APPLICATION # #
# # # # # # VARIABLE ATTRIBUTES # # # # #
# # # # # # > ACTION ( S ) # # # # #
# READ THE NAME OF THE SETTINGS FILE FOR THIS SIMULATION
try :
stream = file ( pathToSettingsFile , 'r' )
thisDict = yaml . load ( stream )
stream . close ( )
except Exception , e :
print str ( e )
# NOW READ IN THE USER SET MCS SETTINGS
try :
stream = file ( pathToSettingsFile , 'r' )
thisDict = yaml . load ( stream )
stream . close ( )
except Exception , e :
print str ( e )
allSettings = thisDict
programSettings = thisDict [ "Program Settings" ]
limitingMags = thisDict [ "Limiting Magnitudes" ]
# for key in limitingMags :
# log . debug ( ' filter : % s , limit : % s ' % ( key , limitingMags [ key ] ) )
sampleNumber = thisDict [ "Simulation Sample" ]
peakMagnitudeDistributions = thisDict [ "SN Absolute Peak-Magnitude Distributions" ]
# log . debug ( ' snDistributions [ magnitude ] % s ' % ( snDistributions [ " magnitude " ] , ) )
# log . debug ( ' snDistributions [ sigma ] % s ' % ( snDistributions [ " sigma " ] , ) )
relativeRatesSet = thisDict [ "Relative Rate Set to Use" ]
relativeSNRates = thisDict [ "Relative SN Rates" ] [ relativeRatesSet ]
# log . debug ( ' relativeSNRates % s ' % ( relativeSNRates , ) )
lowerReshiftLimit = thisDict [ "Lower Redshift Limit" ]
upperRedshiftLimit = thisDict [ "Upper Redshift Limit" ]
# log . debug ( ' upperRedshiftLimit % s ' % ( upperRedshiftLimit , ) )
redshiftResolution = thisDict [ "Redshift Resolution" ]
extinctionSettings = thisDict [ "Extinctions" ]
extinctionType = extinctionSettings [ "constant or random" ]
extinctionConstant = extinctionSettings [ "constant E(b-v)" ]
hostExtinctionDistributions = extinctionSettings [ "host" ]
# log . debug ( ' hostExtinctionDistributions % s ' % ( hostExtinctionDistributions , ) )
galacticExtinctionDistribution = extinctionSettings [ "galactic" ]
# log . debug ( ' galacticExtinctionDistribution % s ' % ( galacticExtinctionDistribution , ) )
surveyCadenceSettings = thisDict [ "Survey Cadence" ]
# log . debug ( ' surveyCadenceSettings % s ' % ( surveyCadenceSettings , ) )
explosionDaysFromSettings = thisDict [ "Explosion Days" ]
extendLightCurveTail = thisDict [ "Extend lightcurve tail?" ]
snLightCurves = thisDict [ "Lightcurves" ]
lightCurvePolyOrder = thisDict [ "Order of polynomial used to fits lightcurves" ]
# log . debug ( ' snlightCurves % s ' % ( snlightCurves , ) )
surveyArea = thisDict [ "Sky Area of the Survey (square degrees)" ]
CCSNRateFraction = thisDict [ "CCSN Progenitor Population Fraction of IMF" ]
transientToCCSNRateFraction = thisDict [ "Transient to CCSN Ratio" ]
extraSurveyConstraints = thisDict [ "Extra Survey Constraints" ]
restFrameFilter = thisDict [ "Rest Frame Filter for K-corrections" ]
kCorrectionTemporalResolution = thisDict [ "K-correction temporal resolution (days)" ]
kCorPolyOrder = thisDict [ "Order of polynomial used to fits k-corrections" ]
kCorMinimumDataPoints = thisDict [ "Minimum number of datapoints used to generate k-correction curve" ]
logLevel = thisDict [ "Level of logging required" ]
return ( allSettings , programSettings , limitingMags , sampleNumber , peakMagnitudeDistributions , explosionDaysFromSettings , extendLightCurveTail , relativeSNRates , lowerReshiftLimit , upperRedshiftLimit , redshiftResolution , restFrameFilter , kCorrectionTemporalResolution , kCorPolyOrder , kCorMinimumDataPoints , extinctionType , extinctionConstant , hostExtinctionDistributions , galacticExtinctionDistribution , surveyCadenceSettings , snLightCurves , surveyArea , CCSNRateFraction , transientToCCSNRateFraction , extraSurveyConstraints , lightCurvePolyOrder , logLevel ) |
def inspect_model ( self , model ) :
"""Inspect a single model""" | # See which interesting fields the model holds .
url_fields = sorted ( f for f in model . _meta . fields if isinstance ( f , ( PluginUrlField , models . URLField ) ) )
file_fields = sorted ( f for f in model . _meta . fields if isinstance ( f , ( PluginImageField , models . FileField ) ) )
html_fields = sorted ( f for f in model . _meta . fields if isinstance ( f , ( models . TextField , PluginHtmlField ) ) )
all_fields = [ f . name for f in ( file_fields + html_fields + url_fields ) ]
if not all_fields :
return [ ]
if model . __name__ in self . exclude :
self . stderr . write ( "Skipping {0} ({1})\n" . format ( model . __name__ , ", " . join ( all_fields ) ) )
return [ ]
sys . stderr . write ( "Inspecting {0} ({1})\n" . format ( model . __name__ , ", " . join ( all_fields ) ) )
q_notnull = reduce ( operator . or_ , ( Q ( ** { "{0}__isnull" . format ( f ) : False } ) for f in all_fields ) )
qs = model . objects . filter ( q_notnull ) . order_by ( 'pk' )
urls = [ ]
for object in qs : # HTML fields need proper html5lib parsing
for field in html_fields :
value = getattr ( object , field . name )
if value :
html_images = self . extract_html_urls ( value )
urls += html_images
for image in html_images :
self . show_match ( object , image )
# Picture fields take the URL from the storage class .
for field in file_fields :
value = getattr ( object , field . name )
if value :
value = unquote_utf8 ( value . url )
urls . append ( value )
self . show_match ( object , value )
# URL fields can be read directly .
for field in url_fields :
value = getattr ( object , field . name )
if value :
if isinstance ( value , six . text_type ) :
value = force_text ( value )
else :
value = value . to_db_value ( )
# AnyUrlValue
urls . append ( value )
self . show_match ( object , value )
return urls |
def xi_eq ( x , kappa , chi_eff , q ) :
"""The roots of this equation determine the orbital radius
at the onset of NS tidal disruption in a nonprecessing
NS - BH binary [ ( 7 ) in Foucart PRD 86 , 124007 ( 2012 ) ]
Parameters
x : float
orbital separation in units of the NS radius
kappa : float
the BH mass divided by the NS radius
chi _ eff : float
the BH dimensionless spin parameter
q : float
the binary mass ratio ( BH mass / NS mass )
Returns
float
x * * 3 * ( x * * 2-3 * kappa * x + 2 * chi _ eff * kappa * sqrt [ kappa * x )
-3 * q * ( x * * 2-2 * kappa * x + ( chi _ eff * kappa ) * * 2)""" | return x ** 3 * ( x ** 2 - 3 * kappa * x + 2 * chi_eff * kappa * math . sqrt ( kappa * x ) ) - 3 * q * ( x ** 2 - 2 * kappa * x + ( chi_eff * kappa ) ** 2 ) |
def _mapper ( self ) :
"""Maps payment attributes to their specific types .
: see : func : ` ~ APIResource . _ mapper `""" | return { 'card' : Payment . Card , 'customer' : Payment . Customer , 'hosted_payment' : Payment . HostedPayment , 'notification' : Payment . Notification , 'failure' : Payment . Failure , } |
def setup_launch_parser ( self , parser ) :
"""Setup the given parser for the launch command
: param parser : the argument parser to setup
: type parser : : class : ` argparse . ArgumentParser `
: returns : None
: rtype : None
: raises : None""" | parser . set_defaults ( func = self . launch )
parser . add_argument ( "addon" , help = "The jukebox addon to launch. The addon should be a standalone plugin." ) |
def _MultiStream ( cls , fds ) :
"""Method overriden by subclasses to optimize the MultiStream behavior .""" | for fd in fds :
fd . Seek ( 0 )
while True :
chunk = fd . Read ( cls . MULTI_STREAM_CHUNK_SIZE )
if not chunk :
break
yield fd , chunk , None |
def get_time_diff_days ( start_txt , end_txt ) :
'''Number of days between two days''' | if start_txt is None or end_txt is None :
return None
start = parser . parse ( start_txt )
end = parser . parse ( end_txt )
seconds_day = float ( 60 * 60 * 24 )
diff_days = ( end - start ) . total_seconds ( ) / seconds_day
diff_days = float ( '%.2f' % diff_days )
return diff_days |
def executorLost ( self , driver , executorId , agentId , status ) :
"""Invoked when an executor has exited / terminated abnormally .""" | failedId = executorId . get ( 'value' , None )
log . warning ( "Executor '%s' reported lost with status '%s'." , failedId , status )
self . _handleFailedExecutor ( agentId . value , failedId ) |
def get_system_uptime_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_system_uptime = ET . Element ( "get_system_uptime" )
config = get_system_uptime
input = ET . SubElement ( get_system_uptime , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def crab_factory ( ** kwargs ) :
'''Factory that generates a CRAB client .
A few parameters will be handled by the factory , other parameters will
be passed on to the client .
: param wsdl : ` Optional . ` Allows overriding the default CRAB wsdl url .
: param proxy : ` Optional . ` A dictionary of proxy information that is passed
to the underlying : class : ` suds . client . Client `
: rtype : : class : ` suds . client . Client `''' | if 'wsdl' in kwargs :
wsdl = kwargs [ 'wsdl' ]
del kwargs [ 'wsdl' ]
else :
wsdl = "http://crab.agiv.be/wscrab/wscrab.svc?wsdl"
log . info ( 'Creating CRAB client with wsdl: %s' , wsdl )
c = Client ( wsdl , ** kwargs )
return c |
def headloss_gen ( Area , Vel , PerimWetted , Length , KMinor , Nu , PipeRough ) :
"""Return the total head lossin the general case .
Total head loss is a combination of major and minor losses .
This equation applies to both laminar and turbulent flows .""" | # Inputs do not need to be checked here because they are checked by
# functions this function calls .
return ( headloss_exp_general ( Vel , KMinor ) . magnitude + headloss_fric_general ( Area , PerimWetted , Vel , Length , Nu , PipeRough ) . magnitude ) |
def _grouped ( input_type , output_type , base_class , output_type_method ) :
"""Define a user - defined function that is applied per group .
Parameters
input _ type : List [ ibis . expr . datatypes . DataType ]
A list of the types found in : mod : ` ~ ibis . expr . datatypes ` . The
length of this list must match the number of arguments to the
function . Variadic arguments are not yet supported .
output _ type : ibis . expr . datatypes . DataType
The return type of the function .
base _ class : Type [ T ]
The base class of the generated Node
output _ type _ method : Callable
A callable that determines the method to call to get the expression
type of the UDF
See Also
ibis . pandas . udf . reduction
ibis . pandas . udf . analytic""" | def wrapper ( func ) :
funcsig = valid_function_signature ( input_type , func )
UDAFNode = type ( func . __name__ , ( base_class , ) , { 'signature' : sig . TypeSignature . from_dtypes ( input_type ) , 'output_type' : output_type_method ( output_type ) , } , )
# An execution rule for a simple aggregate node
@ execute_node . register ( UDAFNode , * udf_signature ( input_type , pin = None , klass = pd . Series ) )
def execute_udaf_node ( op , * args , ** kwargs ) :
args , kwargs = arguments_from_signature ( funcsig , * args , ** kwargs )
return func ( * args , ** kwargs )
# An execution rule for a grouped aggregation node . This
# includes aggregates applied over a window .
nargs = len ( input_type )
group_by_signatures = [ udf_signature ( input_type , pin = pin , klass = SeriesGroupBy ) for pin in range ( nargs ) ]
@ toolz . compose ( * ( execute_node . register ( UDAFNode , * types ) for types in group_by_signatures ) )
def execute_udaf_node_groupby ( op , * args , ** kwargs ) : # construct a generator that yields the next group of data
# for every argument excluding the first ( pandas performs
# the iteration for the first argument ) for each argument
# that is a SeriesGroupBy .
# If the argument is not a SeriesGroupBy then keep
# repeating it until all groups are exhausted .
aggcontext = kwargs . pop ( 'aggcontext' , None )
assert aggcontext is not None , 'aggcontext is None'
iters = ( ( data for _ , data in arg ) if isinstance ( arg , SeriesGroupBy ) else itertools . repeat ( arg ) for arg in args [ 1 : ] )
funcsig = signature ( func )
def aggregator ( first , * rest , ** kwargs ) : # map ( next , * rest ) gets the inputs for the next group
# TODO : might be inefficient to do this on every call
args , kwargs = arguments_from_signature ( funcsig , first , * map ( next , rest ) , ** kwargs )
return func ( * args , ** kwargs )
result = aggcontext . agg ( args [ 0 ] , aggregator , * iters , ** kwargs )
return result
@ functools . wraps ( func )
def wrapped ( * args ) :
return UDAFNode ( * args ) . to_expr ( )
return wrapped
return wrapper |
def iteritems ( self , pipe = None ) :
"""Return an iterator over the dictionary ' s ` ` ( key , value ) ` ` pairs .""" | pipe = self . redis if pipe is None else pipe
for k , v in self . _data ( pipe ) . items ( ) :
yield k , self . cache . get ( k , v ) |
def write ( filename , headers , dcols , data , headerlines = [ ] , header_char = 'H' , sldir = '.' , sep = ' ' , trajectory = False , download = False ) :
'''Method for writeing Ascii files .
Note the attribute name at position i in dcols will be associated
with the column data at index i in data . Also the number of data
columns ( in data ) must equal the number of data attributes ( in dcols )
Also all the lengths of that columns must all be the same .
Parameters
filename : string
The file where this data will be written .
Headers : list
A list of Header strings or if the file being written is of
type trajectory , this is a List of strings that contain header
attributes and their associated values which are seperated by
dcols : list
A list of data attributes .
data : list
A list of lists ( or of numpy arrays ) .
headerlines : list , optional
Additional list of strings of header data , only used in
trajectory data Types . The default is [ ] .
header _ char : character , optional
The character that indicates a header lines . The default is ' H ' .
sldir : string , optional
Where this fill will be written . The default is ' . ' .
sep : string , optional
What seperates the data column attributes . The default is ' ' .
trajectory : boolean , optional
Boolean of if we are writeing a trajectory type file . The
default is False .
download : boolean , optional
If using iPython notebook , do you want a download link for
the file you write ?
The default is False .''' | if sldir . endswith ( os . sep ) :
filename = str ( sldir ) + str ( filename )
else :
filename = str ( sldir ) + os . sep + str ( filename )
tmp = [ ]
# temp variable
lines = [ ]
# list of the data lines
lengthList = [ ]
# list of the longest element ( data or column name )
# in each column
if os . path . exists ( filename ) :
print ( 'Warning this method will overwrite ' + filename )
print ( 'Would you like to continue? (y)es or (n)no?' )
s = input ( '--> ' )
if s == 'Y' or s == 'y' or s == 'Yes' or s == 'yes' :
print ( 'Yes selected' )
print ( 'Continuing as normal' )
else :
print ( 'No Selected' )
print ( 'Returning None' )
return None
if len ( data ) != len ( dcols ) :
print ( 'The number of data columns does not equal the number of Data attributes' )
print ( 'returning none' )
return None
if trajectory :
sep = ' '
for i in range ( len ( headers ) ) :
if not trajectory :
tmp . append ( header_char + ' ' + headers [ i ] + '\n' )
else :
tmp . append ( headers [ i ] + '\n' )
headers = tmp
tmp = ''
for i in range ( len ( data ) ) : # Line length stuff
length = len ( dcols [ i ] )
for j in range ( len ( data [ dcols [ i ] ] ) ) : # len ( data [ i ] ) throws error as type ( data ) = dict , not list
if len ( str ( data [ dcols [ i ] ] [ j ] ) ) > length : # data [ i ] [ j ] throws error as type ( data ) = dict , not list
length = len ( str ( data [ dcols [ i ] ] [ j ] ) )
lengthList . append ( length )
print ( lengthList )
tmp = ''
tmp1 = '9'
if trajectory :
tmp = '#'
for i in range ( len ( dcols ) ) :
tmp1 = dcols [ i ]
if not trajectory :
if len ( dcols [ i ] ) < lengthList [ i ] :
j = lengthList [ i ] - len ( dcols [ i ] )
for k in range ( j ) :
tmp1 += ' '
tmp += sep + tmp1
else :
tmp += ' ' + dcols [ i ]
tmp += '\n'
dcols = tmp
tmp = ''
for i in range ( len ( data [ 0 ] ) ) :
for j in range ( len ( data ) ) :
tmp1 = str ( data [ j ] [ i ] )
if len ( str ( data [ j ] [ i ] ) ) < lengthList [ j ] :
l = lengthList [ j ] - len ( str ( data [ j ] [ i ] ) )
for k in range ( l ) :
tmp1 += ' '
tmp += sep + tmp1
lines . append ( tmp + '\n' )
tmp = ''
f = open ( filename , 'w' )
if not trajectory :
for i in range ( len ( headers ) ) :
f . write ( headers [ i ] )
f . write ( dcols )
else :
f . write ( dcols )
for i in range ( len ( headerlines ) ) :
f . write ( '# ' + headerlines [ i ] + '\n' )
for i in range ( len ( headers ) ) :
f . write ( headers [ i ] )
for i in range ( len ( lines ) ) :
f . write ( lines [ i ] )
f . close ( )
if download :
from IPython . display import FileLink , FileLinks
return FileLink ( filename )
else :
return None |
async def create ( source_id : str ) :
"""Create a connection object , represents a single endpoint and can be used for sending and receiving
credentials and proofs
: param source _ id : Institution ' s unique ID for the connection
: return : connection object
Example :
connection = await Connection . create ( source _ id )""" | constructor_params = ( source_id , )
c_source_id = c_char_p ( source_id . encode ( 'utf-8' ) )
c_params = ( c_source_id , )
return await Connection . _create ( "vcx_connection_create" , constructor_params , c_params ) |
def resample ( self , sampling_rate = None , variables = None , force_dense = False , in_place = False , kind = 'linear' ) :
'''Resample all dense variables ( and optionally , sparse ones ) to the
specified sampling rate .
Args :
sampling _ rate ( int , float ) : Target sampling rate ( in Hz ) . If None ,
uses the instance sampling rate .
variables ( list ) : Optional list of Variables to resample . If None ,
all variables are resampled .
force _ dense ( bool ) : if True , all sparse variables will be forced to
dense .
in _ place ( bool ) : When True , all variables are overwritten in - place .
When False , returns resampled versions of all variables .
kind ( str ) : Argument to pass to scipy ' s interp1d ; indicates the
kind of interpolation approach to use . See interp1d docs for
valid values .''' | # Store old sampling rate - based variables
sampling_rate = sampling_rate or self . sampling_rate
_variables = { }
for name , var in self . variables . items ( ) :
if variables is not None and name not in variables :
continue
if isinstance ( var , SparseRunVariable ) :
if force_dense and is_numeric_dtype ( var . values ) :
_variables [ name ] = var . to_dense ( sampling_rate )
else : # None if in _ place ; no update needed
_var = var . resample ( sampling_rate , inplace = in_place , kind = kind )
if not in_place :
_variables [ name ] = _var
if in_place :
for k , v in _variables . items ( ) :
self . variables [ k ] = v
self . sampling_rate = sampling_rate
else :
return _variables |
def add_service ( self , zeroconf , srv_type , srv_name ) :
"""Method called when a new Zeroconf client is detected .
Return True if the zeroconf client is a Glances server
Note : the return code will never be used""" | if srv_type != zeroconf_type :
return False
logger . debug ( "Check new Zeroconf server: %s / %s" % ( srv_type , srv_name ) )
info = zeroconf . get_service_info ( srv_type , srv_name )
if info :
new_server_ip = socket . inet_ntoa ( info . address )
new_server_port = info . port
# Add server to the global dict
self . servers . add_server ( srv_name , new_server_ip , new_server_port )
logger . info ( "New Glances server detected (%s from %s:%s)" % ( srv_name , new_server_ip , new_server_port ) )
else :
logger . warning ( "New Glances server detected, but Zeroconf info failed to be grabbed" )
return True |
def get_mac_address_table_input_request_type_get_next_request_last_mac_address_details_last_mac_type ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_mac_address_table = ET . Element ( "get_mac_address_table" )
config = get_mac_address_table
input = ET . SubElement ( get_mac_address_table , "input" )
request_type = ET . SubElement ( input , "request-type" )
get_next_request = ET . SubElement ( request_type , "get-next-request" )
last_mac_address_details = ET . SubElement ( get_next_request , "last-mac-address-details" )
last_mac_type = ET . SubElement ( last_mac_address_details , "last-mac-type" )
last_mac_type . text = kwargs . pop ( 'last_mac_type' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _get_zoom_mat ( sw : float , sh : float , c : float , r : float ) -> AffineMatrix :
"` sw ` , ` sh ` scale width , height - ` c ` , ` r ` focus col , row ." | return [ [ sw , 0 , c ] , [ 0 , sh , r ] , [ 0 , 0 , 1. ] ] |
def decodeTagAttributes ( self , text ) :
"""docstring for decodeTagAttributes""" | attribs = { }
if text . strip ( ) == u'' :
return attribs
scanner = _attributePat . scanner ( text )
match = scanner . search ( )
while match :
key , val1 , val2 , val3 , val4 = match . groups ( )
value = val1 or val2 or val3 or val4
if value :
value = _space . sub ( u' ' , value ) . strip ( )
else :
value = ''
attribs [ key ] = self . decodeCharReferences ( value )
match = scanner . search ( )
return attribs |
def postal_code ( random = random , * args , ** kwargs ) :
"""Produce something that vaguely resembles a postal code
> > > mock _ random . seed ( 0)
> > > postal _ code ( random = mock _ random )
' b0b 0c0'
> > > postal _ code ( random = mock _ random , capitalize = True )
' E0E 0F0'
> > > postal _ code ( random = mock _ random , slugify = True )
' h0h - 0i0'""" | return random . choice ( [ "{letter}{number}{letter} {other_number}{other_letter}{other_number}" , "{number}{other_number}{number}{number}{other_number}" , "{number}{letter}{number}{other_number}{other_letter}" ] ) . format ( number = number ( random = random ) , other_number = number ( random = random ) , letter = letter ( random = random ) , other_letter = letter ( random = random ) ) |
def overlay_gateway_site_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
overlay_gateway = ET . SubElement ( config , "overlay-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-tunnels" )
name_key = ET . SubElement ( overlay_gateway , "name" )
name_key . text = kwargs . pop ( 'name' )
site = ET . SubElement ( overlay_gateway , "site" )
name = ET . SubElement ( site , "name" )
name . text = kwargs . pop ( 'name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def verifyRefimage ( refimage ) :
"""Verify that the value of refimage specified by the user points to an
extension with a proper WCS defined . It starts by making sure an extension gets
specified by the user when using a MEF file . The final check comes by looking
for a CD matrix in the WCS object itself . If either test fails , it returns
a value of False .""" | valid = True
# start by trying to see whether the code can even find the file
if is_blank ( refimage ) :
valid = True
return valid
refroot , extroot = fileutil . parseFilename ( refimage )
if not os . path . exists ( refroot ) :
valid = False
return valid
# if a MEF has been specified , make sure extension contains a valid WCS
if valid :
if extroot is None :
extn = findWCSExtn ( refimage )
if extn is None :
valid = False
else :
valid = True
else : # check for CD matrix in WCS object
refwcs = wcsutil . HSTWCS ( refimage )
if not refwcs . wcs . has_cd ( ) :
valid = False
else :
valid = True
del refwcs
return valid |
def nic_a_c ( msg ) :
"""Obtain NICa / c , navigation integrity category supplements a and c
Args :
msg ( string ) : 28 bytes hexadecimal message string
Returns :
( int , int ) : NICa and NICc number ( 0 or 1)""" | tc = typecode ( msg )
if tc != 31 :
raise RuntimeError ( "%s: Not a status operation message, expecting TC = 31" % msg )
msgbin = common . hex2bin ( msg )
nic_a = int ( msgbin [ 75 ] )
nic_c = int ( msgbin [ 51 ] )
return nic_a , nic_c |
def add ( self , item , overflow_policy = OVERFLOW_POLICY_OVERWRITE ) :
"""Adds the specified item to the tail of the Ringbuffer . If there is no space in the Ringbuffer , the action is
determined by overflow policy as : const : ` OVERFLOW _ POLICY _ OVERWRITE ` or : const : ` OVERFLOW _ POLICY _ FAIL ` .
: param item : ( object ) , the specified item to be added .
: param overflow _ policy : ( int ) , the OverflowPolicy to be used when there is no space ( optional ) .
: return : ( long ) , the sequenceId of the added item , or - 1 if the add failed .""" | return self . _encode_invoke ( ringbuffer_add_codec , value = self . _to_data ( item ) , overflow_policy = overflow_policy ) |
def google ( rest ) :
"Look up a phrase on google" | API_URL = 'https://www.googleapis.com/customsearch/v1?'
try :
key = pmxbot . config [ 'Google API key' ]
except KeyError :
return "Configure 'Google API key' in config"
# Use a custom search that searches everything normally
# http : / / stackoverflow . com / a / 11206266/70170
custom_search = '004862762669074674786:hddvfu0gyg0'
params = dict ( key = key , cx = custom_search , q = rest . strip ( ) , )
url = API_URL + urllib . parse . urlencode ( params )
resp = requests . get ( url )
resp . raise_for_status ( )
results = resp . json ( )
hit1 = next ( iter ( results [ 'items' ] ) )
return ' - ' . join ( ( urllib . parse . unquote ( hit1 [ 'link' ] ) , hit1 [ 'title' ] , ) ) |
def get_coordinate_variables ( ds ) :
'''Returns a list of variable names that identify as coordinate variables .
A coordinate variable is a netCDF variable with exactly one dimension . The
name of this dimension must be equivalent to the variable name .
From CF § 1.2 Terminology
It is a one - dimensional variable with the same name as its dimension [ e . g . ,
time ( time ) ] , and it is defined as a numeric data type with values that are
ordered monotonically . Missing values are not allowed in coordinate
variables .
: param netCDF4 . Dataset ds : An open netCDF dataset''' | coord_vars = [ ]
for dimension in ds . dimensions :
if dimension in ds . variables :
if ds . variables [ dimension ] . dimensions == ( dimension , ) :
coord_vars . append ( dimension )
return coord_vars |
def get_releases ( data , ** kwargs ) :
"""Gets all releases from pypi meta data .
: param data : dict , meta data
: return : list , str releases""" | if "versions" in data :
return sorted ( data [ "versions" ] . keys ( ) , key = lambda v : parse ( v ) , reverse = True )
return [ ] |
def add_ring ( self , ring ) :
"""Adds a ring to _ rings if not already existing""" | if ring not in self . _rings and isinstance ( ring , RingDing0 ) :
self . _rings . append ( ring ) |
def get_persistent_boot_device ( self ) :
"""Get current persistent boot device set for the host
: returns : persistent boot device for the system
: raises : IloError , on an error from iLO .""" | boot_string = None
if not self . persistent_boot_config_order or not self . boot_sources :
msg = ( 'Boot sources or persistent boot config order not found' )
LOG . debug ( msg )
raise exception . IloError ( msg )
preferred_boot_device = self . persistent_boot_config_order [ 0 ]
for boot_source in self . boot_sources :
if ( ( boot_source . get ( "StructuredBootString" ) is not None ) and ( preferred_boot_device == boot_source . get ( "StructuredBootString" ) ) ) :
boot_string = boot_source [ "BootString" ]
break
else :
msg = ( ( 'Persistent boot device failed, as no matched boot ' 'sources found for device: %(persistent_boot_device)s' ) % { 'persistent_boot_device' : preferred_boot_device } )
LOG . debug ( msg )
raise exception . IloError ( msg )
for key , value in BOOT_SOURCE_TARGET_TO_PARTIAL_STRING_MAP . items ( ) :
for val in value :
if val in boot_string :
return key
return sushy . BOOT_SOURCE_TARGET_NONE |
def get_uri ( self ) :
"""Returns a URI formatted representation of the host , including all
of it ' s attributes except for the name . Uses the
address , not the name of the host to build the URI .
: rtype : str
: return : A URI .""" | url = Url ( )
url . protocol = self . get_protocol ( )
url . hostname = self . get_address ( )
url . port = self . get_tcp_port ( )
url . vars = dict ( ( k , to_list ( v ) ) for ( k , v ) in list ( self . get_all ( ) . items ( ) ) if isinstance ( v , str ) or isinstance ( v , list ) )
if self . account :
url . username = self . account . get_name ( )
url . password1 = self . account . get_password ( )
url . password2 = self . account . authorization_password
return str ( url ) |
def get_volumes_for_instance ( self , arg , device = None ) :
"""Return all EC2 Volume objects attached to ` ` arg ` ` instance name or ID .
May specify ` ` device ` ` to limit to the ( single ) volume attached as that
device .""" | instance = self . get ( arg )
filters = { 'attachment.instance-id' : instance . id }
if device is not None :
filters [ 'attachment.device' ] = device
return self . get_all_volumes ( filters = filters ) |
def do_ranges_intersect ( begin , end , old_begin , old_end ) :
"""Determine if the two given memory address ranges intersect .
@ type begin : int
@ param begin : Start address of the first range .
@ type end : int
@ param end : End address of the first range .
@ type old _ begin : int
@ param old _ begin : Start address of the second range .
@ type old _ end : int
@ param old _ end : End address of the second range .
@ rtype : bool
@ return : C { True } if the two ranges intersect , C { False } otherwise .""" | return ( old_begin <= begin < old_end ) or ( old_begin < end <= old_end ) or ( begin <= old_begin < end ) or ( begin < old_end <= end ) |
def round_to ( dt , hour , minute , second , mode = "round" ) :
"""Round the given datetime to specified hour , minute and second .
: param mode : ' floor ' or ' ceiling '
. . versionadded : : 0.0.5
message
* * 中文文档 * *
将给定时间对齐到最近的一个指定了小时 , 分钟 , 秒的时间上 。""" | mode = mode . lower ( )
if mode not in _round_to_options :
raise ValueError ( "'mode' has to be one of %r!" % list ( _round_to_options . keys ( ) ) )
return _round_to_options [ mode ] ( dt , hour , minute , second ) |
def execute_flow ( self , custom_command = "" , is_config = False ) :
"""Execute flow which run custom command on device
: param custom _ command : the command to execute on device
: param is _ config : if True then run command in configuration mode
: return : command execution output""" | responses = [ ]
if isinstance ( custom_command , str ) :
commands = [ custom_command ]
elif isinstance ( custom_command , tuple ) :
commands = list ( custom_command )
else :
commands = custom_command
if is_config :
mode = self . _cli_handler . config_mode
if not mode :
raise Exception ( self . __class__ . __name__ , "CliHandler configuration is missing. Config Mode has to be defined" )
else :
mode = self . _cli_handler . enable_mode
if not mode :
raise Exception ( self . __class__ . __name__ , "CliHandler configuration is missing. Enable Mode has to be defined" )
with self . _cli_handler . get_cli_service ( mode ) as session :
for cmd in commands :
responses . append ( session . send_command ( command = cmd ) )
return '\n' . join ( responses ) |
def get_rows ( self , match , fields = None , limit = 10 , sampling = None ) :
"""Returns raw rows that matches given query
: arg match : query to be run against Kibana log messages ( ex . { " @ message " : " Foo Bar DB queries " } )
: type fields list [ str ] or None
: arg limit : the number of results ( defaults to 10)
: type sampling int or None
: arg sampling : Percentage of results to be returned ( 0,100)""" | query = { "match" : match , }
return self . _search ( query , fields , limit , sampling ) |
def handleEvent ( self , eventObj ) :
"""This method should be called every time through the main loop .
Returns :
False - if no event happens .
True - if the user clicks the animation to start it playing .""" | if not self . visible :
return
if not self . isEnabled :
return False
if eventObj . type != MOUSEBUTTONDOWN : # The animation only cares about a mouse down event
return False
eventPointInAnimationRect = self . rect . collidepoint ( eventObj . pos )
if not eventPointInAnimationRect : # clicked outside of animation
return False
if self . state == PygAnimation . PLAYING : # if playing , ignore the click
return False
return True |
def search ( self , scope , search , ** kwargs ) :
"""Search GitLab resources matching the provided string . '
Args :
scope ( str ) : Scope of the search
search ( str ) : Search string
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabSearchError : If the server failed to perform the request
Returns :
GitlabList : A list of dicts describing the resources found .""" | data = { 'scope' : scope , 'search' : search }
return self . http_list ( '/search' , query_data = data , ** kwargs ) |
def get_function_by_path ( dotted_function_module_path ) :
"""Returns the function for a given path ( e . g . ' my _ app . my _ module . my _ function ' ) .""" | # Separate the module path from the function name .
module_path , function_name = tuple ( dotted_function_module_path . rsplit ( '.' , 1 ) )
module = import_module ( module_path )
return getattr ( module , function_name ) |
def add_url_rule ( self , rule , endpoint = None , view_func = None , ** options ) :
"""Like : meth : ` ~ flask . Flask . add _ url _ rule ` but for a blueprint . The endpoint for
the : func : ` url _ for ` function is prefixed with the name of the blueprint .
Overridden to allow dots in endpoint names""" | self . record ( lambda s : s . add_url_rule ( rule , endpoint , view_func , register_with_babel = False , ** options ) ) |
def refresh_modules ( self , module_string = None , exact = True ) :
"""Update modules .
if module _ string is None all modules are refreshed
if module _ string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed .
If a module is an i3status one then we refresh i3status .
To prevent abuse , we rate limit this function to 100ms for full
refreshes .""" | if not module_string :
if time . time ( ) > ( self . last_refresh_ts + 0.1 ) :
self . last_refresh_ts = time . time ( )
else : # rate limiting
return
update_i3status = False
for name , module in self . output_modules . items ( ) :
if ( module_string is None or ( exact and name == module_string ) or ( not exact and name . startswith ( module_string ) ) ) :
if module [ "type" ] == "py3status" :
if self . config [ "debug" ] :
self . log ( "refresh py3status module {}" . format ( name ) )
module [ "module" ] . force_update ( )
else :
if self . config [ "debug" ] :
self . log ( "refresh i3status module {}" . format ( name ) )
update_i3status = True
if update_i3status :
self . i3status_thread . refresh_i3status ( ) |
def generate_symbolic_cmd_line_arg ( state , max_length = 1000 ) :
"""Generates a new symbolic cmd line argument string .
: return : The string reference .""" | str_ref = SimSootValue_StringRef ( state . memory . get_new_uuid ( ) )
str_sym = StringS ( "cmd_line_arg" , max_length )
state . solver . add ( str_sym != StringV ( "" ) )
state . memory . store ( str_ref , str_sym )
return str_ref |
def nodes_callback ( self , data ) :
"""Callback for nodes with tags""" | for node_id , tags , coords in data : # Discard the coords because they go into add _ coords
self . nodes [ node_id ] = tags |
def order_by_json_path ( self , json_path , language_code = None , order = 'asc' ) :
"""Orders a queryset by the value of the specified ` json _ path ` .
More about the ` # > > ` operator and the ` json _ path ` arg syntax :
https : / / www . postgresql . org / docs / current / static / functions - json . html
More about Raw SQL expressions :
https : / / docs . djangoproject . com / en / dev / ref / models / expressions / # raw - sql - expressions
Usage example :
MyModel . objects . language ( ' en _ us ' ) . filter ( is _ active = True ) . order _ by _ json _ path ( ' title ' )""" | language_code = ( language_code or self . _language_code or self . get_language_key ( language_code ) )
json_path = '{%s,%s}' % ( language_code , json_path )
# Our jsonb field is named ` translations ` .
raw_sql_expression = RawSQL ( "translations#>>%s" , ( json_path , ) )
if order == 'desc' :
raw_sql_expression = raw_sql_expression . desc ( )
return self . order_by ( raw_sql_expression ) |
def write ( self , data ) :
"""Send raw bytes to the instrument .
: param data : bytes to be sent to the instrument
: type data : bytes""" | begin , end , size = 0 , 0 , len ( data )
bytes_sent = 0
raw_write = super ( USBRawDevice , self ) . write
while not end > size :
begin = end
end = begin + self . RECV_CHUNK
bytes_sent += raw_write ( data [ begin : end ] )
return bytes_sent |
def write ( self , arg ) :
"""Write a string or bytes object to the buffer""" | if isinstance ( arg , str ) :
arg = arg . encode ( self . encoding )
return self . _buffer . write ( arg ) |
def remove_columns ( self , column_names , inplace = False ) :
"""Returns an SFrame with one or more columns removed .
If inplace = = False ( default ) this operation does not modify the
current SFrame , returning a new SFrame .
If inplace = = True , this operation modifies the current
SFrame , returning self .
Parameters
column _ names : list or iterable
A list or iterable of column names .
inplace : bool , optional . Defaults to False .
Whether the SFrame is modified in place .
Returns
out : SFrame
The SFrame with given columns removed .
Examples
> > > sf = turicreate . SFrame ( { ' id ' : [ 1 , 2 , 3 ] , ' val1 ' : [ ' A ' , ' B ' , ' C ' ] , ' val2 ' : [ 10 , 11 , 12 ] } )
> > > res = sf . remove _ columns ( [ ' val1 ' , ' val2 ' ] )
> > > res
| id |
[3 rows x 1 columns ]""" | column_names = list ( column_names )
existing_columns = dict ( ( k , i ) for i , k in enumerate ( self . column_names ( ) ) )
for name in column_names :
if name not in existing_columns :
raise KeyError ( 'Cannot find column %s' % name )
# Delete it going backwards so we don ' t invalidate indices
deletion_indices = sorted ( existing_columns [ name ] for name in column_names )
if inplace :
ret = self
else :
ret = self . copy ( )
for colid in reversed ( deletion_indices ) :
with cython_context ( ) :
ret . __proxy__ . remove_column ( colid )
ret . _cache = None
return ret |
def sf ( self , lrt ) :
"""computes the survival function of a mixture of a chi - squared random variable of degree
0 and a scaled chi - squared random variable of degree d""" | _lrt = SP . copy ( lrt )
_lrt [ lrt < self . tol ] = 0
pv = self . mixture * STATS . chi2 . sf ( _lrt / self . scale , self . dof )
return pv |
def partition ( src , key = None ) :
"""No relation to : meth : ` str . partition ` , ` ` partition ` ` is like
: func : ` bucketize ` , but for added convenience returns a tuple of
` ` ( truthy _ values , falsy _ values ) ` ` .
> > > nonempty , empty = partition ( [ ' ' , ' ' , ' hi ' , ' ' , ' bye ' ] )
> > > nonempty
[ ' hi ' , ' bye ' ]
* key * defaults to : class : ` bool ` , but can be carefully overridden to
use any function that returns either ` ` True ` ` or ` ` False ` ` .
> > > import string
> > > is _ digit = lambda x : x in string . digits
> > > decimal _ digits , hexletters = partition ( string . hexdigits , is _ digit )
> > > ' ' . join ( decimal _ digits ) , ' ' . join ( hexletters )
( ' 0123456789 ' , ' abcdefABCDEF ' )""" | bucketized = bucketize ( src , key )
return bucketized . get ( True , [ ] ) , bucketized . get ( False , [ ] ) |
def histogram2d ( data1 , data2 , bins = None , * args , ** kwargs ) :
"""Facade function to create 2D histogram using dask .""" | # TODO : currently very unoptimized ! for non - dasks
import dask
if "axis_names" not in kwargs :
if hasattr ( data1 , "name" ) and hasattr ( data2 , "name" ) :
kwargs [ "axis_names" ] = [ data1 . name , data2 . name ]
if not hasattr ( data1 , "dask" ) :
data1 = dask . array . from_array ( data1 , chunks = data1 . size ( ) / 100 )
if not hasattr ( data2 , "dask" ) :
data2 = dask . array . from_array ( data2 , chunks = data2 . size ( ) / 100 )
data = dask . array . stack ( [ data1 , data2 ] , axis = 1 )
kwargs [ "dim" ] = 2
return histogramdd ( data , bins , * args , ** kwargs ) |
def parseString ( string , uri = None ) :
"""Read an XML document provided as a byte string , and return a
: mod : ` lxml . etree ` document . String cannot be a Unicode string .
Base _ uri should be provided for the calculation of relative URIs .""" | return etree . fromstring ( string , parser = _get_xmlparser ( ) , base_url = uri ) |
def knot_insertion ( degree , knotvector , ctrlpts , u , ** kwargs ) :
"""Computes the control points of the rational / non - rational spline after knot insertion .
Part of Algorithm A5.1 of The NURBS Book by Piegl & Tiller , 2nd Edition .
Keyword Arguments :
* ` ` num ` ` : number of knot insertions . * Default : 1*
* ` ` s ` ` : multiplicity of the knot . * Default : computed via : func : ` . find _ multiplicity ` *
* ` ` span ` ` : knot span . * Default : computed via : func : ` . find _ span _ linear ` *
: param degree : degree
: type degree : int
: param knotvector : knot vector
: type knotvector : list , tuple
: param ctrlpts : control points
: type ctrlpts : list
: param u : knot to be inserted
: type u : float
: return : updated control points
: rtype : list""" | # Get keyword arguments
num = kwargs . get ( 'num' , 1 )
# number of knot insertions
s = kwargs . get ( 's' , find_multiplicity ( u , knotvector ) )
# multiplicity
k = kwargs . get ( 'span' , find_span_linear ( degree , knotvector , len ( ctrlpts ) , u ) )
# knot span
# Initialize variables
np = len ( ctrlpts )
nq = np + num
# Initialize new control points array ( control points may be weighted or not )
ctrlpts_new = [ [ ] for _ in range ( nq ) ]
# Initialize a local array of length p + 1
temp = [ [ ] for _ in range ( degree + 1 ) ]
# Save unaltered control points
for i in range ( 0 , k - degree + 1 ) :
ctrlpts_new [ i ] = ctrlpts [ i ]
for i in range ( k - s , np ) :
ctrlpts_new [ i + num ] = ctrlpts [ i ]
# Start filling the temporary local array which will be used to update control points during knot insertion
for i in range ( 0 , degree - s + 1 ) :
temp [ i ] = deepcopy ( ctrlpts [ k - degree + i ] )
# Insert knot " num " times
for j in range ( 1 , num + 1 ) :
L = k - degree + j
for i in range ( 0 , degree - j - s + 1 ) :
alpha = knot_insertion_alpha ( u , tuple ( knotvector ) , k , i , L )
if isinstance ( temp [ i ] [ 0 ] , float ) :
temp [ i ] [ : ] = [ alpha * elem2 + ( 1.0 - alpha ) * elem1 for elem1 , elem2 in zip ( temp [ i ] , temp [ i + 1 ] ) ]
else :
for idx in range ( len ( temp [ i ] ) ) :
temp [ i ] [ idx ] [ : ] = [ alpha * elem2 + ( 1.0 - alpha ) * elem1 for elem1 , elem2 in zip ( temp [ i ] [ idx ] , temp [ i + 1 ] [ idx ] ) ]
ctrlpts_new [ L ] = deepcopy ( temp [ 0 ] )
ctrlpts_new [ k + num - j - s ] = deepcopy ( temp [ degree - j - s ] )
# Load remaining control points
L = k - degree + num
for i in range ( L + 1 , k - s ) :
ctrlpts_new [ i ] = deepcopy ( temp [ i - L ] )
# Return control points after knot insertion
return ctrlpts_new |
def render_pdf_file_to_image_files_pdftoppm_ppm ( pdf_file_name , root_output_file_path , res_x = 150 , res_y = 150 , extra_args = None ) :
"""Use the pdftoppm program to render a PDF file to . png images . The
root _ output _ file _ path is prepended to all the output files , which have numbers
and extensions added . Extra arguments can be passed as a list in extra _ args .
Return the command output .""" | if extra_args is None :
extra_args = [ ]
if not pdftoppm_executable :
init_and_test_pdftoppm_executable ( prefer_local = False , exit_on_fail = True )
if old_pdftoppm_version : # We only have - r , not - rx and - ry .
command = [ pdftoppm_executable ] + extra_args + [ "-r" , res_x , pdf_file_name , root_output_file_path ]
else :
command = [ pdftoppm_executable ] + extra_args + [ "-rx" , res_x , "-ry" , res_y , pdf_file_name , root_output_file_path ]
comm_output = get_external_subprocess_output ( command )
return comm_output |
def jpath_parse ( jpath ) :
"""Parse given JPath into chunks .
Returns list of dictionaries describing all of the JPath chunks .
: param str jpath : JPath to be parsed into chunks
: return : JPath chunks as list of dicts
: rtype : : py : class : ` list `
: raises JPathException : in case of invalid JPath syntax""" | result = [ ]
breadcrumbs = [ ]
# Split JPath into chunks based on ' . ' character .
chunks = jpath . split ( '.' )
for chnk in chunks :
match = RE_JPATH_CHUNK . match ( chnk )
if match :
res = { }
# Record whole match .
res [ 'm' ] = chnk
# Record breadcrumb path .
breadcrumbs . append ( chnk )
res [ 'p' ] = '.' . join ( breadcrumbs )
# Handle node name .
res [ 'n' ] = match . group ( 1 )
# Handle node index ( optional , may be omitted ) .
if match . group ( 2 ) :
res [ 'i' ] = match . group ( 3 )
if str ( res [ 'i' ] ) == '#' :
res [ 'i' ] = - 1
elif str ( res [ 'i' ] ) == '*' :
pass
else :
res [ 'i' ] = int ( res [ 'i' ] ) - 1
result . append ( res )
else :
raise JPathException ( "Invalid JPath chunk '{}'" . format ( chnk ) )
return result |
def delete ( self , option ) :
"""Deletes an option if exists""" | if self . config is not None :
if option in self . config :
del self . config [ option ] |
def comparable ( self ) :
"""str : comparable representation of the path specification .""" | string_parts = [ ]
if self . location is not None :
string_parts . append ( 'location: {0:s}' . format ( self . location ) )
if self . part_index is not None :
string_parts . append ( 'part index: {0:d}' . format ( self . part_index ) )
if self . start_offset is not None :
string_parts . append ( 'start offset: 0x{0:08x}' . format ( self . start_offset ) )
return self . _GetComparable ( sub_comparable_string = ', ' . join ( string_parts ) ) |
def random_population ( dna_size , pop_size , tune_params ) :
"""create a random population""" | population = [ ]
for _ in range ( pop_size ) :
dna = [ ]
for i in range ( dna_size ) :
dna . append ( random_val ( i , tune_params ) )
population . append ( dna )
return population |
def _determine_impact_coordtransform ( self , deltaAngleTrackImpact , nTrackChunksImpact , timpact , impact_angle ) :
"""Function that sets up the transformation between ( x , v ) and ( O , theta )""" | # Integrate the progenitor backward to the time of impact
self . _gap_progenitor_setup ( )
# Sign of delta angle tells us whether the impact happens to the
# leading or trailing arm , self . _ sigMeanSign contains this info
if impact_angle > 0. :
self . _gap_leading = True
else :
self . _gap_leading = False
if ( self . _gap_leading and not self . _leading ) or ( not self . _gap_leading and self . _leading ) :
raise ValueError ( 'Modeling leading (trailing) impact for trailing (leading) arm; this is not allowed because it is nonsensical in this framework' )
self . _gap_sigMeanSign = 1.
if ( self . _gap_leading and self . _progenitor_Omega_along_dOmega / self . _sigMeanSign < 0. ) or ( not self . _gap_leading and self . _progenitor_Omega_along_dOmega / self . _sigMeanSign > 0. ) :
self . _gap_sigMeanSign = - 1.
# Determine how much orbital time is necessary for the progenitor ' s orbit at the time of impact to cover the part of the stream near the impact ; we cover the whole leading ( or trailing ) part of the stream
if nTrackChunksImpact is None : # default is floor ( self . _ deltaAngleTrackImpact / 0.15 ) + 1
self . _nTrackChunksImpact = int ( numpy . floor ( self . _deltaAngleTrackImpact / 0.15 ) ) + 1
else :
self . _nTrackChunksImpact = nTrackChunksImpact
if self . _nTrackChunksImpact < 4 :
self . _nTrackChunksImpact = 4
dt = self . _deltaAngleTrackImpact / self . _progenitor_Omega_along_dOmega / self . _sigMeanSign * self . _gap_sigMeanSign
self . _gap_trackts = numpy . linspace ( 0. , 2 * dt , 2 * self . _nTrackChunksImpact - 1 )
# to be sure that we cover it
# Instantiate an auxiliaryTrack , which is an Orbit instance at the mean frequency of the stream , and zero angle separation wrt the progenitor ; prog _ stream _ offset is the offset between this track and the progenitor at zero angle ( same as in streamdf , but just done at the time of impact rather than the current time )
prog_stream_offset = _determine_stream_track_single ( self . _aA , self . _gap_progenitor , self . _timpact , # around the t of imp
self . _progenitor_angle - self . _timpact * self . _progenitor_Omega , self . _gap_sigMeanSign , self . _dsigomeanProgDirection , lambda da : super ( streamgapdf , self ) . meanOmega ( da , offset_sign = self . _gap_sigMeanSign , tdisrupt = self . _tdisrupt - self . _timpact , use_physical = False ) , 0. )
# angle = 0
auxiliaryTrack = Orbit ( prog_stream_offset [ 3 ] )
if dt < 0. :
self . _gap_trackts = numpy . linspace ( 0. , - 2. * dt , 2. * self . _nTrackChunksImpact - 1 )
# Flip velocities before integrating
auxiliaryTrack = auxiliaryTrack . flip ( )
auxiliaryTrack . integrate ( self . _gap_trackts , self . _pot )
if dt < 0. : # Flip velocities again
auxiliaryTrack . _orb . orbit [ : , 1 ] = - auxiliaryTrack . _orb . orbit [ : , 1 ]
auxiliaryTrack . _orb . orbit [ : , 2 ] = - auxiliaryTrack . _orb . orbit [ : , 2 ]
auxiliaryTrack . _orb . orbit [ : , 4 ] = - auxiliaryTrack . _orb . orbit [ : , 4 ]
# Calculate the actions , frequencies , and angle for this auxiliary orbit
acfs = self . _aA . actionsFreqs ( auxiliaryTrack ( 0. ) , maxn = 3 , use_physical = False )
auxiliary_Omega = numpy . array ( [ acfs [ 3 ] , acfs [ 4 ] , acfs [ 5 ] ] ) . reshape ( 3 )
auxiliary_Omega_along_dOmega = numpy . dot ( auxiliary_Omega , self . _dsigomeanProgDirection )
# compute the transformation using _ determine _ stream _ track _ single
allAcfsTrack = numpy . empty ( ( self . _nTrackChunksImpact , 9 ) )
alljacsTrack = numpy . empty ( ( self . _nTrackChunksImpact , 6 , 6 ) )
allinvjacsTrack = numpy . empty ( ( self . _nTrackChunksImpact , 6 , 6 ) )
thetasTrack = numpy . linspace ( 0. , self . _deltaAngleTrackImpact , self . _nTrackChunksImpact )
ObsTrack = numpy . empty ( ( self . _nTrackChunksImpact , 6 ) )
ObsTrackAA = numpy . empty ( ( self . _nTrackChunksImpact , 6 ) )
detdOdJps = numpy . empty ( ( self . _nTrackChunksImpact ) )
if self . _multi is None :
for ii in range ( self . _nTrackChunksImpact ) :
multiOut = _determine_stream_track_single ( self . _aA , auxiliaryTrack , self . _gap_trackts [ ii ] * numpy . fabs ( self . _progenitor_Omega_along_dOmega / auxiliary_Omega_along_dOmega ) , # this factor accounts for the difference in frequency between the progenitor and the auxiliary track , no timpact bc gap _ tracks is relative to timpact
self . _progenitor_angle - self . _timpact * self . _progenitor_Omega , self . _gap_sigMeanSign , self . _dsigomeanProgDirection , lambda da : super ( streamgapdf , self ) . meanOmega ( da , offset_sign = self . _gap_sigMeanSign , tdisrupt = self . _tdisrupt - self . _timpact , use_physical = False ) , thetasTrack [ ii ] )
allAcfsTrack [ ii , : ] = multiOut [ 0 ]
alljacsTrack [ ii , : , : ] = multiOut [ 1 ]
allinvjacsTrack [ ii , : , : ] = multiOut [ 2 ]
ObsTrack [ ii , : ] = multiOut [ 3 ]
ObsTrackAA [ ii , : ] = multiOut [ 4 ]
detdOdJps [ ii ] = multiOut [ 5 ]
else :
multiOut = multi . parallel_map ( ( lambda x : _determine_stream_track_single ( self . _aA , auxiliaryTrack , self . _gap_trackts [ x ] * numpy . fabs ( self . _progenitor_Omega_along_dOmega / auxiliary_Omega_along_dOmega ) , # this factor accounts for the difference in frequency between the progenitor and the auxiliary track , no timpact bc gap _ tracks is relative to timpact
self . _progenitor_angle - self . _timpact * self . _progenitor_Omega , self . _gap_sigMeanSign , self . _dsigomeanProgDirection , lambda da : super ( streamgapdf , self ) . meanOmega ( da , offset_sign = self . _gap_sigMeanSign , tdisrupt = self . _tdisrupt - self . _timpact , use_physical = False ) , thetasTrack [ x ] ) ) , range ( self . _nTrackChunksImpact ) , numcores = numpy . amin ( [ self . _nTrackChunksImpact , multiprocessing . cpu_count ( ) , self . _multi ] ) )
for ii in range ( self . _nTrackChunksImpact ) :
allAcfsTrack [ ii , : ] = multiOut [ ii ] [ 0 ]
alljacsTrack [ ii , : , : ] = multiOut [ ii ] [ 1 ]
allinvjacsTrack [ ii , : , : ] = multiOut [ ii ] [ 2 ]
ObsTrack [ ii , : ] = multiOut [ ii ] [ 3 ]
ObsTrackAA [ ii , : ] = multiOut [ ii ] [ 4 ]
detdOdJps [ ii ] = multiOut [ ii ] [ 5 ]
# Repeat the track calculation using the previous track , to get closer to it
for nn in range ( self . nTrackIterations ) :
if self . _multi is None :
for ii in range ( self . _nTrackChunksImpact ) :
multiOut = _determine_stream_track_single ( self . _aA , Orbit ( ObsTrack [ ii , : ] ) , 0. , self . _progenitor_angle - self . _timpact * self . _progenitor_Omega , self . _gap_sigMeanSign , self . _dsigomeanProgDirection , lambda da : super ( streamgapdf , self ) . meanOmega ( da , offset_sign = self . _gap_sigMeanSign , tdisrupt = self . _tdisrupt - self . _timpact , use_physical = False ) , thetasTrack [ ii ] )
allAcfsTrack [ ii , : ] = multiOut [ 0 ]
alljacsTrack [ ii , : , : ] = multiOut [ 1 ]
allinvjacsTrack [ ii , : , : ] = multiOut [ 2 ]
ObsTrack [ ii , : ] = multiOut [ 3 ]
ObsTrackAA [ ii , : ] = multiOut [ 4 ]
detdOdJps [ ii ] = multiOut [ 5 ]
else :
multiOut = multi . parallel_map ( ( lambda x : _determine_stream_track_single ( self . _aA , Orbit ( ObsTrack [ x , : ] ) , 0. , self . _progenitor_angle - self . _timpact * self . _progenitor_Omega , self . _gap_sigMeanSign , self . _dsigomeanProgDirection , lambda da : super ( streamgapdf , self ) . meanOmega ( da , offset_sign = self . _gap_sigMeanSign , tdisrupt = self . _tdisrupt - self . _timpact , use_physical = False ) , thetasTrack [ x ] ) ) , range ( self . _nTrackChunksImpact ) , numcores = numpy . amin ( [ self . _nTrackChunksImpact , multiprocessing . cpu_count ( ) , self . _multi ] ) )
for ii in range ( self . _nTrackChunksImpact ) :
allAcfsTrack [ ii , : ] = multiOut [ ii ] [ 0 ]
alljacsTrack [ ii , : , : ] = multiOut [ ii ] [ 1 ]
allinvjacsTrack [ ii , : , : ] = multiOut [ ii ] [ 2 ]
ObsTrack [ ii , : ] = multiOut [ ii ] [ 3 ]
ObsTrackAA [ ii , : ] = multiOut [ ii ] [ 4 ]
detdOdJps [ ii ] = multiOut [ ii ] [ 5 ]
# Store the track
self . _gap_thetasTrack = thetasTrack
self . _gap_ObsTrack = ObsTrack
self . _gap_ObsTrackAA = ObsTrackAA
self . _gap_allAcfsTrack = allAcfsTrack
self . _gap_alljacsTrack = alljacsTrack
self . _gap_allinvjacsTrack = allinvjacsTrack
self . _gap_detdOdJps = detdOdJps
self . _gap_meandetdOdJp = numpy . mean ( self . _gap_detdOdJps )
self . _gap_logmeandetdOdJp = numpy . log ( self . _gap_meandetdOdJp )
# Also calculate _ ObsTrackXY in XYZ , vXYZ coordinates
self . _gap_ObsTrackXY = numpy . empty_like ( self . _gap_ObsTrack )
TrackX = self . _gap_ObsTrack [ : , 0 ] * numpy . cos ( self . _gap_ObsTrack [ : , 5 ] )
TrackY = self . _gap_ObsTrack [ : , 0 ] * numpy . sin ( self . _gap_ObsTrack [ : , 5 ] )
TrackZ = self . _gap_ObsTrack [ : , 3 ]
TrackvX , TrackvY , TrackvZ = bovy_coords . cyl_to_rect_vec ( self . _gap_ObsTrack [ : , 1 ] , self . _gap_ObsTrack [ : , 2 ] , self . _gap_ObsTrack [ : , 4 ] , self . _gap_ObsTrack [ : , 5 ] )
self . _gap_ObsTrackXY [ : , 0 ] = TrackX
self . _gap_ObsTrackXY [ : , 1 ] = TrackY
self . _gap_ObsTrackXY [ : , 2 ] = TrackZ
self . _gap_ObsTrackXY [ : , 3 ] = TrackvX
self . _gap_ObsTrackXY [ : , 4 ] = TrackvY
self . _gap_ObsTrackXY [ : , 5 ] = TrackvZ
return None |
def lookup_int ( values , name = None ) :
"""Lookup field which transforms the result into an integer .
: param values : values allowed
: param name : name for the field
: return : grammar for the lookup field""" | field = basic . lookup ( values , name )
field . addParseAction ( lambda l : int ( l [ 0 ] ) )
return field |
def configure_flair ( self , subreddit , flair_enabled = False , flair_position = 'right' , flair_self_assign = False , link_flair_enabled = False , link_flair_position = 'left' , link_flair_self_assign = False ) :
"""Configure the flair setting for the given subreddit .
: returns : The json response from the server .""" | flair_enabled = 'on' if flair_enabled else 'off'
flair_self_assign = 'on' if flair_self_assign else 'off'
if not link_flair_enabled :
link_flair_position = ''
link_flair_self_assign = 'on' if link_flair_self_assign else 'off'
data = { 'r' : six . text_type ( subreddit ) , 'flair_enabled' : flair_enabled , 'flair_position' : flair_position , 'flair_self_assign_enabled' : flair_self_assign , 'link_flair_position' : link_flair_position , 'link_flair_self_assign_enabled' : link_flair_self_assign }
return self . request_json ( self . config [ 'flairconfig' ] , data = data ) |
def solve_sweep_structure ( self , structures , sweep_param_list , filename = "structure_n_effs.dat" , plot = True , x_label = "Structure number" , fraction_mode_list = [ ] , ) :
"""Find the modes of many structures .
Args :
structures ( list ) : A list of ` Structures ` to find the modes
of .
sweep _ param _ list ( list ) : A list of the parameter - sweep sweep
that was used . This is for plotting purposes only .
filename ( str ) : The nominal filename to use when saving the
effective indices . Defaults to ' structure _ n _ effs . dat ' .
plot ( bool ) : ` True ` if plots should be generates ,
otherwise ` False ` . Default is ` True ` .
x _ label ( str ) : x - axis text to display in the plot .
fraction _ mode _ list ( list ) : A list of mode indices of the modes
that should be included in the TE / TM mode fraction plot .
If the list is empty , all modes will be included . The list
is empty by default .
Returns :
list : A list of the effective indices found for each structure .""" | n_effs = [ ]
mode_types = [ ]
fractions_te = [ ]
fractions_tm = [ ]
for s in tqdm . tqdm ( structures , ncols = 70 ) :
self . solve ( s )
n_effs . append ( np . real ( self . n_effs ) )
mode_types . append ( self . _get_mode_types ( ) )
fractions_te . append ( self . fraction_te )
fractions_tm . append ( self . fraction_tm )
if filename :
self . _write_n_effs_to_file ( n_effs , self . _modes_directory + filename , sweep_param_list )
with open ( self . _modes_directory + "mode_types.dat" , "w" ) as fs :
header = "," . join ( "Mode%i" % i for i , _ in enumerate ( mode_types [ 0 ] ) )
fs . write ( "# " + header + "\n" )
for mt in mode_types :
txt = "," . join ( "%s %.2f" % pair for pair in mt )
fs . write ( txt + "\n" )
with open ( self . _modes_directory + "fraction_te.dat" , "w" ) as fs :
header = "fraction te"
fs . write ( "# param sweep," + header + "\n" )
for param , fte in zip ( sweep_param_list , fractions_te ) :
txt = "%.6f," % param
txt += "," . join ( "%.2f" % f for f in fte )
fs . write ( txt + "\n" )
with open ( self . _modes_directory + "fraction_tm.dat" , "w" ) as fs :
header = "fraction tm"
fs . write ( "# param sweep," + header + "\n" )
for param , ftm in zip ( sweep_param_list , fractions_tm ) :
txt = "%.6f," % param
txt += "," . join ( "%.2f" % f for f in ftm )
fs . write ( txt + "\n" )
if plot :
if MPL :
title = "$n_{eff}$ vs %s" % x_label
y_label = "$n_{eff}$"
else :
title = "n_{effs} vs %s" % x_label
y_label = "n_{eff}"
self . _plot_n_effs ( self . _modes_directory + filename , self . _modes_directory + "fraction_te.dat" , x_label , y_label , title )
title = "TE Fraction vs %s" % x_label
self . _plot_fraction ( self . _modes_directory + "fraction_te.dat" , x_label , "TE Fraction [%]" , title , fraction_mode_list , )
title = "TM Fraction vs %s" % x_label
self . _plot_fraction ( self . _modes_directory + "fraction_tm.dat" , x_label , "TM Fraction [%]" , title , fraction_mode_list , )
return n_effs |
def histogram ( data , ** kwargs ) :
"""Function to create histogram , e . g . for voltages or currents .
Parameters
data : : pandas : ` pandas . DataFrame < dataframe > `
Data to be plotted , e . g . voltage or current ( ` v _ res ` or ` i _ res ` from
: class : ` edisgo . grid . network . Results ` ) . Index of the dataframe must be
a : pandas : ` pandas . DatetimeIndex < datetimeindex > ` .
timeindex : : pandas : ` pandas . Timestamp < timestamp > ` or None , optional
Specifies time step histogram is plotted for . If timeindex is None all
time steps provided in dataframe are used . Default : None .
directory : : obj : ` str ` or None , optional
Path to directory the plot is saved to . Is created if it does not
exist . Default : None .
filename : : obj : ` str ` or None , optional
Filename the plot is saved as . File format is specified by ending . If
filename is None , the plot is shown . Default : None .
color : : obj : ` str ` or None , optional
Color used in plot . If None it defaults to blue . Default : None .
alpha : : obj : ` float ` , optional
Transparency of the plot . Must be a number between 0 and 1,
where 0 is see through and 1 is opaque . Default : 1.
title : : obj : ` str ` or None , optional
Plot title . Default : None .
x _ label : : obj : ` str ` , optional
Label for x - axis . Default : " " .
y _ label : : obj : ` str ` , optional
Label for y - axis . Default : " " .
normed : : obj : ` bool ` , optional
Defines if histogram is normed . Default : False .
x _ limits : : obj : ` tuple ` or None , optional
Tuple with x - axis limits . First entry is the minimum and second entry
the maximum value . Default : None .
y _ limits : : obj : ` tuple ` or None , optional
Tuple with y - axis limits . First entry is the minimum and second entry
the maximum value . Default : None .
fig _ size : : obj : ` str ` or : obj : ` tuple ` , optional
Size of the figure in inches or a string with the following options :
* ' a4portrait '
* ' a4landscape '
* ' a5portrait '
* ' a5landscape '
Default : ' a5landscape ' .
binwidth : : obj : ` float `
Width of bins . Default : None .""" | timeindex = kwargs . get ( 'timeindex' , None )
directory = kwargs . get ( 'directory' , None )
filename = kwargs . get ( 'filename' , None )
title = kwargs . get ( 'title' , "" )
x_label = kwargs . get ( 'x_label' , "" )
y_label = kwargs . get ( 'y_label' , "" )
color = kwargs . get ( 'color' , None )
alpha = kwargs . get ( 'alpha' , 1 )
normed = kwargs . get ( 'normed' , False )
x_limits = kwargs . get ( 'x_limits' , None )
y_limits = kwargs . get ( 'y_limits' , None )
binwidth = kwargs . get ( 'binwidth' , None )
fig_size = kwargs . get ( 'fig_size' , 'a5landscape' )
standard_sizes = { 'a4portrait' : ( 8.27 , 11.69 ) , 'a4landscape' : ( 11.69 , 8.27 ) , 'a5portrait' : ( 5.8 , 8.3 ) , 'a5landscape' : ( 8.3 , 5.8 ) }
try :
fig_size = standard_sizes [ fig_size ]
except :
fig_size = standard_sizes [ 'a5landscape' ]
if timeindex is not None :
plot_data = data . loc [ timeindex , : ]
else :
plot_data = data . T . stack ( )
if binwidth is not None :
if x_limits is not None :
lowerlimit = x_limits [ 0 ] - binwidth / 2
upperlimit = x_limits [ 1 ] + binwidth / 2
else :
lowerlimit = plot_data . min ( ) - binwidth / 2
upperlimit = plot_data . max ( ) + binwidth / 2
bins = np . arange ( lowerlimit , upperlimit , binwidth )
else :
bins = 10
plt . figure ( figsize = fig_size )
ax = plot_data . hist ( normed = normed , color = color , alpha = alpha , bins = bins , grid = True )
plt . minorticks_on ( )
if x_limits is not None :
ax . set_xlim ( x_limits [ 0 ] , x_limits [ 1 ] )
if y_limits is not None :
ax . set_ylim ( y_limits [ 0 ] , y_limits [ 1 ] )
if title is not None :
plt . title ( title )
plt . xlabel ( x_label )
plt . ylabel ( y_label )
if filename is None :
plt . show ( )
else :
if directory is not None :
os . makedirs ( directory , exist_ok = True )
filename = os . path . join ( directory , filename )
plt . savefig ( filename )
plt . close ( ) |
def set_sorting ( self , flag ) :
"""Enable result sorting after search is complete .""" | self . sorting [ 'status' ] = flag
self . header ( ) . setSectionsClickable ( flag == ON ) |
def get_pressure ( self ) :
"""Returns the pressure in Millibars""" | self . _init_pressure ( )
# Ensure pressure sensor is initialised
pressure = 0
data = self . _pressure . pressureRead ( )
if ( data [ 0 ] ) : # Pressure valid
pressure = data [ 1 ]
return pressure |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.