signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def results ( data ) :
"""Results""" | cdata = [ ]
for r in data :
cdata . append ( r . data )
return cdata |
def setup_lilypond_windows ( path = "default" ) :
'''Optional helper method which does the environment setup for lilypond in windows . If you ' ve ran this method , you do not need and should not provide
a lyscript when you instantiate this class . As this method is static , you can run this method before you set up the LilypondRenderer
instance .
* parameter : path _ to _ lilypond is the path to the folder which contains the file " lilypond . exe " . Usually ProgramFiles / Lilypond / usr / bin .
Leave at default to set to this path .
* returns : None''' | default = "C:/Program Files (x86)/LilyPond/usr/bin"
path_variable = os . environ [ 'PATH' ] . split ( ";" )
if path == "default" :
path_variable . append ( default )
else :
path_variable . append ( path )
os . environ [ 'PATH' ] = ";" . join ( path_variable ) |
def send_dir ( self , local_path , remote_path , user = 'root' ) :
"""Upload a directory on the remote host .""" | self . enable_user ( user )
return self . ssh_pool . send_dir ( user , local_path , remote_path ) |
def get_last_traded_dt ( self , asset , dt ) :
"""Get the latest day on or before ` ` dt ` ` in which ` ` asset ` ` traded .
If there are no trades on or before ` ` dt ` ` , returns ` ` pd . NaT ` ` .
Parameters
asset : zipline . asset . Asset
The asset for which to get the last traded day .
dt : pd . Timestamp
The dt at which to start searching for the last traded day .
Returns
last _ traded : pd . Timestamp
The day of the last trade for the given asset , using the
input dt as a vantage point .""" | country_code = self . _country_code_for_assets ( [ asset . sid ] )
return self . _readers [ country_code ] . get_last_traded_dt ( asset , dt ) |
def create_badge ( self , update = False ) :
"""Saves the badge in the database ( or updates it if ` ` update ` ` is ` ` True ` ` ) .
Returns a tuple : ` ` badge ` ` ( the badge object ) and ` ` created ` ` ( ` ` True ` ` , if
badge has been created ) .""" | badge , created = self . badge , False
if badge :
logger . debug ( '✓ Badge %s: already created' , badge . slug )
if update :
to_update = { }
for field in ( 'name' , 'slug' , 'description' , 'image' ) :
attr = getattr ( self , field )
badge_attr = getattr ( badge , field )
if attr != badge_attr :
to_update [ field ] = attr
logger . debug ( '✓ Badge %s: updated "%s" field' , self . slug , field )
Badge . objects . filter ( id = badge . id ) . update ( ** to_update )
else :
kwargs = { 'name' : self . name , 'image' : self . image }
optional_fields = [ 'slug' , 'description' ]
for field in optional_fields :
value = getattr ( self , field )
if value is not None :
kwargs [ field ] = value
badge = Badge . objects . create ( ** kwargs )
created = True
logger . debug ( '✓ Badge %s: created' , badge . slug )
return ( badge , created ) |
def set_data_length ( self , length ) : # type : ( int ) - > None
'''A method to set the length of data for this El Torito Entry .
Parameters :
length - The new length for the El Torito Entry .
Returns :
Nothing .''' | if not self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'El Torito Entry not initialized' )
self . sector_count = utils . ceiling_div ( length , 512 ) |
def transform ( self , ref , strict = False ) :
"""Transform a URI reference relative to ` self ` into a
: class : ` SplitResult ` representing its target URI .""" | scheme , authority , path , query , fragment = self . RE . match ( ref ) . groups ( )
# RFC 3986 5.2.2 . Transform References
if scheme is not None and ( strict or scheme != self . scheme ) :
path = self . __remove_dot_segments ( path )
elif authority is not None :
scheme = self . scheme
path = self . __remove_dot_segments ( path )
elif not path :
scheme = self . scheme
authority = self . authority
path = self . path
query = self . query if query is None else query
elif path . startswith ( self . SLASH ) :
scheme = self . scheme
authority = self . authority
path = self . __remove_dot_segments ( path )
else :
scheme = self . scheme
authority = self . authority
path = self . __remove_dot_segments ( self . __merge ( path ) )
return type ( self ) ( scheme , authority , path , query , fragment ) |
def filter_unique_peptides ( peptides , score , ns ) :
"""Filters unique peptides from multiple Percolator output XML files .
Takes a dir with a set of XMLs , a score to filter on and a namespace .
Outputs an ElementTree .""" | scores = { 'q' : 'q_value' , 'pep' : 'pep' , 'p' : 'p_value' , 'svm' : 'svm_score' }
highest = { }
for el in peptides :
featscore = float ( el . xpath ( 'xmlns:%s' % scores [ score ] , namespaces = ns ) [ 0 ] . text )
seq = reader . get_peptide_seq ( el , ns )
if seq not in highest :
highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore }
if score == 'svm' : # greater than score is accepted
if featscore > highest [ seq ] [ 'score' ] :
highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore }
else : # lower than score is accepted
if featscore < highest [ seq ] [ 'score' ] :
highest [ seq ] = { 'pep_el' : formatting . stringify_strip_namespace_declaration ( el , ns ) , 'score' : featscore }
formatting . clear_el ( el )
for pep in list ( highest . values ( ) ) :
yield pep [ 'pep_el' ] |
def _makeTags ( tagStr , xml ) :
"""Internal helper to construct opening and closing tag expressions , given a tag name""" | if isinstance ( tagStr , basestring ) :
resname = tagStr
tagStr = Keyword ( tagStr , caseless = not xml )
else :
resname = tagStr . name
tagAttrName = Word ( alphas , alphanums + "_-:" )
if ( xml ) :
tagAttrValue = dblQuotedString . copy ( ) . setParseAction ( removeQuotes )
openTag = Suppress ( "<" ) + tagStr ( "tag" ) + Dict ( ZeroOrMore ( Group ( tagAttrName + Suppress ( "=" ) + tagAttrValue ) ) ) + Optional ( "/" , default = [ False ] ) . setResultsName ( "empty" ) . setParseAction ( lambda s , l , t : t [ 0 ] == '/' ) + Suppress ( ">" )
else :
printablesLessRAbrack = "" . join ( c for c in printables if c not in ">" )
tagAttrValue = quotedString . copy ( ) . setParseAction ( removeQuotes ) | Word ( printablesLessRAbrack )
openTag = Suppress ( "<" ) + tagStr ( "tag" ) + Dict ( ZeroOrMore ( Group ( tagAttrName . setParseAction ( downcaseTokens ) + Optional ( Suppress ( "=" ) + tagAttrValue ) ) ) ) + Optional ( "/" , default = [ False ] ) . setResultsName ( "empty" ) . setParseAction ( lambda s , l , t : t [ 0 ] == '/' ) + Suppress ( ">" )
closeTag = Combine ( _L ( "</" ) + tagStr + ">" )
openTag = openTag . setResultsName ( "start" + "" . join ( resname . replace ( ":" , " " ) . title ( ) . split ( ) ) ) . setName ( "<%s>" % tagStr )
closeTag = closeTag . setResultsName ( "end" + "" . join ( resname . replace ( ":" , " " ) . title ( ) . split ( ) ) ) . setName ( "</%s>" % tagStr )
openTag . tag = resname
closeTag . tag = resname
return openTag , closeTag |
def elem_add ( self , idx = None , name = None , ** kwargs ) :
"""Add an element of this model
: param idx : element idx
: param name : element name
: param kwargs : keyword arguments of the parameters
: return : allocated idx""" | idx = self . system . devman . register_element ( dev_name = self . _name , idx = idx )
self . system . __dict__ [ self . _group ] . register_element ( self . _name , idx )
self . uid [ idx ] = self . n
self . idx . append ( idx )
self . mdl_to . append ( list ( ) )
self . mdl_from . append ( list ( ) )
# self . n + = 1
if name is None :
self . name . append ( self . _name + ' ' + str ( self . n ) )
else :
self . name . append ( name )
# check mandatory parameters
for key in self . _mandatory :
if key not in kwargs . keys ( ) :
self . log ( 'Mandatory parameter <{:s}.{:s}> missing' . format ( self . name [ - 1 ] , key ) , ERROR )
sys . exit ( 1 )
# set default values
for key , value in self . _data . items ( ) :
self . __dict__ [ key ] . append ( value )
# overwrite custom values
for key , value in kwargs . items ( ) :
if key not in self . _data :
self . log ( 'Parameter <{:s}.{:s}> is not used.' . format ( self . name [ - 1 ] , key ) , WARNING )
continue
self . __dict__ [ key ] [ - 1 ] = value
# check data consistency
if not value and key in self . _zeros :
if key == 'Sn' :
default = self . system . mva
elif key == 'fn' :
default = self . system . config . freq
else :
default = self . _data [ key ]
self . __dict__ [ key ] [ - 1 ] = default
self . log ( 'Using default value for <{:s}.{:s}>' . format ( self . name [ - 1 ] , key ) , WARNING )
return idx |
def kak_decomposition ( mat : np . ndarray , rtol : float = 1e-5 , atol : float = 1e-8 ) -> KakDecomposition :
"""Decomposes a 2 - qubit unitary into 1 - qubit ops and XX / YY / ZZ interactions .
Args :
mat : The 4x4 unitary matrix to decompose .
rtol : Per - matrix - entry relative tolerance on equality .
atol : Per - matrix - entry absolute tolerance on equality .
Returns :
A ` cirq . KakDecomposition ` canonicalized such that the interaction
coefficients x , y , z satisfy :
0 ≤ abs ( z ) ≤ y ≤ x ≤ π / 4
z = ̸ - π / 4
Raises :
ValueError : Bad matrix .
ArithmeticError : Failed to perform the decomposition .
References :
' An Introduction to Cartan ' s KAK Decomposition for QC Programmers '
https : / / arxiv . org / abs / quant - ph / 0507171""" | magic = np . array ( [ [ 1 , 0 , 0 , 1j ] , [ 0 , 1j , 1 , 0 ] , [ 0 , 1j , - 1 , 0 ] , [ 1 , 0 , 0 , - 1j ] ] ) * np . sqrt ( 0.5 )
gamma = np . array ( [ [ 1 , 1 , 1 , 1 ] , [ 1 , 1 , - 1 , - 1 ] , [ - 1 , 1 , - 1 , 1 ] , [ 1 , - 1 , - 1 , 1 ] ] ) * 0.25
# Diagonalize in magic basis .
left , d , right = diagonalize . bidiagonalize_unitary_with_special_orthogonals ( combinators . dot ( np . conj ( magic . T ) , mat , magic ) , atol = atol , rtol = rtol , check_preconditions = False )
# Recover pieces .
a1 , a0 = so4_to_magic_su2s ( left . T , atol = atol , rtol = rtol , check_preconditions = False )
b1 , b0 = so4_to_magic_su2s ( right . T , atol = atol , rtol = rtol , check_preconditions = False )
w , x , y , z = gamma . dot ( np . vstack ( np . angle ( d ) ) ) . flatten ( )
g = np . exp ( 1j * w )
# Canonicalize .
inner_cannon = kak_canonicalize_vector ( x , y , z )
b1 = np . dot ( inner_cannon . single_qubit_operations_before [ 0 ] , b1 )
b0 = np . dot ( inner_cannon . single_qubit_operations_before [ 1 ] , b0 )
a1 = np . dot ( a1 , inner_cannon . single_qubit_operations_after [ 0 ] )
a0 = np . dot ( a0 , inner_cannon . single_qubit_operations_after [ 1 ] )
return KakDecomposition ( interaction_coefficients = inner_cannon . interaction_coefficients , global_phase = g * inner_cannon . global_phase , single_qubit_operations_before = ( b1 , b0 ) , single_qubit_operations_after = ( a1 , a0 ) ) |
def load_reconstruction ( folder , slice_start = 0 , slice_end = - 1 ) :
"""Load a volume from folder , also returns the corresponding partition .
Parameters
folder : str
Path to the folder where the DICOM files are stored .
slice _ start : int
Index of the first slice to use . Used for subsampling .
slice _ end : int
Index of the final slice to use .
Returns
partition : ` odl . RectPartition `
Partition describing the geometric positioning of the voxels .
data : ` numpy . ndarray `
Volumetric data . Scaled such that data = 1 for water ( 0 HU ) .
Notes
DICOM data is highly non trivial . Typically , each slice has been computed
with a slice tickness ( e . g . 3mm ) but the slice spacing might be
different from that .
Further , the coordinates in DICOM is typically the * middle * of the pixel ,
not the corners as in ODL .
This function should handle all of these peculiarities and give a volume
with the correct coordinate system attached .""" | file_names = sorted ( [ f for f in os . listdir ( folder ) if f . endswith ( ".IMA" ) ] )
if len ( file_names ) == 0 :
raise ValueError ( 'No DICOM files found in {}' . format ( folder ) )
volumes = [ ]
datasets = [ ]
file_names = file_names [ slice_start : slice_end ]
for file_name in tqdm . tqdm ( file_names , 'loading volume data' ) : # read the file
dataset = dicom . read_file ( folder + '/' + file_name )
# Get parameters
pixel_size = np . array ( dataset . PixelSpacing )
pixel_thickness = float ( dataset . SliceThickness )
rows = dataset . Rows
cols = dataset . Columns
# Get data array and convert to correct coordinates
data_array = np . array ( np . frombuffer ( dataset . PixelData , 'H' ) , dtype = 'float32' )
data_array = data_array . reshape ( [ cols , rows ] , order = 'C' )
data_array = np . rot90 ( data_array , - 1 )
# Convert from storage type to densities
# TODO : Optimize these computations
hu_values = ( dataset . RescaleSlope * data_array + dataset . RescaleIntercept )
densities = ( hu_values + 1000 ) / 1000
# Store results
volumes . append ( densities )
datasets . append ( dataset )
voxel_size = np . array ( list ( pixel_size ) + [ pixel_thickness ] )
shape = np . array ( [ rows , cols , len ( volumes ) ] )
# Compute geometry parameters
mid_pt = ( np . array ( dataset . ReconstructionTargetCenterPatient ) - np . array ( dataset . DataCollectionCenterPatient ) )
reconstruction_size = ( voxel_size * shape )
min_pt = mid_pt - reconstruction_size / 2
max_pt = mid_pt + reconstruction_size / 2
# axis 1 has reversed convention
min_pt [ 1 ] , max_pt [ 1 ] = - max_pt [ 1 ] , - min_pt [ 1 ]
if len ( datasets ) > 1 :
slice_distance = np . abs ( float ( datasets [ 1 ] . DataCollectionCenterPatient [ 2 ] ) - float ( datasets [ 0 ] . DataCollectionCenterPatient [ 2 ] ) )
else : # If we only have one slice , we must approximate the distance .
slice_distance = pixel_thickness
# The middle of the minimum / maximum slice can be computed from the
# DICOM attribute " DataCollectionCenterPatient " . Since ODL uses corner
# points ( e . g . edge of volume ) we need to add half a voxel thickness to
# both sides .
min_pt [ 2 ] = - np . array ( datasets [ 0 ] . DataCollectionCenterPatient ) [ 2 ]
min_pt [ 2 ] -= 0.5 * slice_distance
max_pt [ 2 ] = - np . array ( datasets [ - 1 ] . DataCollectionCenterPatient ) [ 2 ]
max_pt [ 2 ] += 0.5 * slice_distance
partition = odl . uniform_partition ( min_pt , max_pt , shape )
volume = np . transpose ( np . array ( volumes ) , ( 1 , 2 , 0 ) )
return partition , volume |
def range_pad ( lower , upper , padding = None , log = False ) :
"""Pads the range by a fraction of the interval""" | if padding is not None and not isinstance ( padding , tuple ) :
padding = ( padding , padding )
if is_number ( lower ) and is_number ( upper ) and padding is not None :
if not isinstance ( lower , datetime_types ) and log and lower > 0 and upper > 0 :
log_min = np . log ( lower ) / np . log ( 10 )
log_max = np . log ( upper ) / np . log ( 10 )
lspan = ( log_max - log_min ) * ( 1 + padding [ 0 ] * 2 )
uspan = ( log_max - log_min ) * ( 1 + padding [ 1 ] * 2 )
center = ( log_min + log_max ) / 2.0
start , end = np . power ( 10 , center - lspan / 2. ) , np . power ( 10 , center + uspan / 2. )
else :
if isinstance ( lower , datetime_types ) and not isinstance ( lower , cftime_types ) : # Ensure timedelta can be safely divided
lower , upper = np . datetime64 ( lower ) , np . datetime64 ( upper )
span = ( upper - lower ) . astype ( '>m8[ns]' )
else :
span = ( upper - lower )
lpad = span * ( padding [ 0 ] )
upad = span * ( padding [ 1 ] )
start , end = lower - lpad , upper + upad
else :
start , end = lower , upper
return start , end |
def _ParseRedirected ( self , parser_mediator , msiecf_item , recovered = False ) :
"""Extract data from a MSIE Cache Files ( MSIECF ) redirected item .
Every item is stored as an event object , one for each timestamp .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
msiecf _ item ( pymsiecf . redirected ) : MSIECF redirected item .
recovered ( Optional [ bool ] ) : True if the item was recovered .""" | date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
event_data = MSIECFRedirectedEventData ( )
event_data . offset = msiecf_item . offset
event_data . recovered = recovered
event_data . url = msiecf_item . location
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_NOT_A_TIME )
parser_mediator . ProduceEventWithEventData ( event , event_data ) |
def find_classes ( self , name = ".*" , no_external = False ) :
"""Find classes by name , using regular expression
This method will return all ClassAnalysis Object that match the name of
the class .
: param name : regular expression for class name ( default " . * " )
: param no _ external : Remove external classes from the output ( default False )
: rtype : generator of ` ClassAnalysis `""" | for cname , c in self . classes . items ( ) :
if no_external and isinstance ( c . get_vm_class ( ) , ExternalClass ) :
continue
if re . match ( name , cname ) :
yield c |
def size_bundle_from_tubecount ( N , Do , pitch , Ntp = 1 , angle = 30 , Method = None , AvailableMethods = False ) :
r'''Calculates the outer diameter of a tube bundle containing a specified
number of tubes .
The tube count is effected by the pitch , number of tube passes , and angle .
The result is an exact number of tubes and is calculated by a very accurate
method using number theory by default . This method is available only up to
100,000 tubes .
Parameters
N : int
Total number of tubes that fit in the heat exchanger , [ - ]
Do : float
Tube outer diameter , [ m ]
pitch : float
Pitch ; distance between two orthogonal tube centers , [ m ]
Ntp : int , optional
Number of tube passes , [ - ]
angle : float , optional
The angle the tubes are positioned ; 30 , 45 , 60 or 90 , [ degrees ]
Returns
DBundle : float
Outer diameter of tube bundle , [ m ]
methods : list , only returned if AvailableMethods = = True
List of methods which can be used to calculate the tube count
Other Parameters
Method : string , optional
One of ' Phadkeb ' , ' HEDH ' , ' VDI ' or ' Perry '
AvailableMethods : bool , optional
If True , function will consider which methods which can be used to
calculate the tube count with the given inputs
See Also
Ntubes
DBundle _ for _ Ntubes _ Phadkeb
D _ for _ Ntubes _ VDI
DBundle _ for _ Ntubes _ HEDH
Notes
The ' Perry ' method is solved with a numerical solver and is very unreliable .
Examples
> > > size _ bundle _ from _ tubecount ( N = 1285 , Do = 0.025 , pitch = 0.03125)
1.1985676402390355''' | def list_methods ( ) :
methods = [ 'Phadkeb' ]
if Ntp == 1 :
methods . append ( 'HEDH' )
if Ntp in [ 1 , 2 , 4 , 8 ] :
methods . append ( 'VDI' )
if Ntp in [ 1 , 2 , 4 , 6 ] : # Also restricted to 1.25 pitch ratio but not hard coded
methods . append ( 'Perry' )
return methods
if AvailableMethods :
return list_methods ( )
if not Method :
Method = 'Phadkeb'
if Method == 'Phadkeb' :
return DBundle_for_Ntubes_Phadkeb ( Ntubes = N , Ntp = Ntp , Do = Do , pitch = pitch , angle = angle )
elif Method == 'VDI' :
return D_for_Ntubes_VDI ( N = N , Ntp = Ntp , Do = Do , pitch = pitch , angle = angle )
elif Method == 'HEDH' :
return DBundle_for_Ntubes_HEDH ( N = N , Do = Do , pitch = pitch , angle = angle )
elif Method == 'Perry' :
to_solve = lambda D : Ntubes_Perrys ( DBundle = D , Do = Do , Ntp = Ntp , angle = angle ) - N
return ridder ( to_solve , Do * 5 , 1000 * Do )
else :
raise Exception ( 'Method not recognized; allowable methods are ' '"Phadkeb", "HEDH", "VDI", and "Perry"' ) |
def _setup_events ( plugin ) :
"""Handles setup or teardown of event hook registration for the provided
plugin .
` plugin `
` ` Plugin ` ` class .""" | events = plugin . events
if events and isinstance ( events , ( list , tuple ) ) :
for event in [ e for e in events if e in _EVENT_VALS ] :
register ( 'event' , event , plugin ) |
def track_pa11y_stats ( pa11y_results , spider ) :
"""Keep track of the number of pa11y errors , warnings , and notices that
we ' ve seen so far , using the Scrapy stats collector :
http : / / doc . scrapy . org / en / 1.1 / topics / stats . html""" | num_err , num_warn , num_notice = pa11y_counts ( pa11y_results )
stats = spider . crawler . stats
stats . inc_value ( "pa11y/error" , count = num_err , spider = spider )
stats . inc_value ( "pa11y/warning" , count = num_warn , spider = spider )
stats . inc_value ( "pa11y/notice" , count = num_notice , spider = spider ) |
def lock ( self , lock_name , timeout = 900 ) :
"""Attempt to use lock and unlock , which will work if the Cache is Redis ,
but fall back to a memcached - compliant add / delete approach .
If the Jobtastic Cache isn ' t Redis or Memcache , or another product
with a compatible lock or add / delete API , then a custom locking function
will be required . However , Redis and Memcache are expected to account for
the vast majority of installations .
See :
- http : / / loose - bits . com / 2010/10 / distributed - task - locking - in - celery . html
- http : / / celery . readthedocs . org / en / latest / tutorials / task - cookbook . html # ensuring - a - task - is - only - executed - one - at - a - time # NOQA""" | # Try Redis first
try :
try :
lock = self . cache . lock
except AttributeError :
try : # Possibly using old Django - Redis
lock = self . cache . client . lock
except AttributeError : # Possibly using Werkzeug + Redis
lock = self . cache . _client . lock
have_lock = False
lock = lock ( lock_name , timeout = timeout )
try :
have_lock = lock . acquire ( blocking = True )
if have_lock :
yield
finally :
if have_lock :
lock . release ( )
except AttributeError : # No lock method on the cache , so fall back to add
have_lock = False
try :
while not have_lock :
have_lock = self . cache . add ( lock_name , 'locked' , timeout )
if have_lock :
yield
finally :
if have_lock :
self . cache . delete ( lock_name ) |
def _get ( self , obj ) :
'''Internal implementation of instance attribute access for the
` ` BasicPropertyDescriptor ` ` getter .
If the value has not been explicitly set by a user , return that
value . Otherwise , return the default .
Args :
obj ( HasProps ) : the instance to get a value of this property for
Returns :
object
Raises :
RuntimeError
If the | HasProps | instance has not yet been initialized , or if
this descriptor is on a class that is not a | HasProps | .''' | if not hasattr ( obj , '_property_values' ) :
raise RuntimeError ( "Cannot get a property value '%s' from a %s instance before HasProps.__init__" % ( self . name , obj . __class__ . __name__ ) )
if self . name not in obj . _property_values :
return self . _get_default ( obj )
else :
return obj . _property_values [ self . name ] |
def _increment_index ( self , di = 1 ) :
"""Move the most recently displayed annotation to the next item in the
series , if possible . If ` ` di ` ` is - 1 , move it to the previous item .""" | if self . _last_event is None :
return
if not hasattr ( self . _last_event , 'ind' ) :
return
event = self . _last_event
xy = pick_info . get_xy ( event . artist )
if xy is not None :
x , y = xy
i = ( event . ind [ 0 ] + di ) % len ( x )
event . ind = [ i ]
event . mouseevent . xdata = x [ i ]
event . mouseevent . ydata = y [ i ]
self . update ( event , self . _last_annotation ) |
def subtract ( dict_a , dict_b , strict = False ) :
"""a stricter form of subtract _ by _ key ( ) , this version will only remove an
entry from dict _ a if the key is in dict _ b * and * the value at that key
matches""" | if not strict :
return subtract_by_key ( dict_a , dict_b )
difference_dict = { }
for key in dict_a :
if key not in dict_b or dict_b [ key ] != dict_a [ key ] :
difference_dict [ key ] = dict_a [ key ]
return difference_dict |
def createValidationDataSampler ( dataset , ratio ) :
"""Create ` torch . utils . data . Sampler ` s used to split the dataset into 2 ramdom
sampled subsets . The first should used for training and the second for
validation .
: param dataset : A valid torch . utils . data . Dataset ( i . e . torchvision . datasets . MNIST )
: param ratio : The percentage of the dataset to be used for training . The
remaining ( 1 - ratio ) % will be used for validation
: return : tuple with 2 torch . utils . data . Sampler . ( train , validate )""" | indices = np . random . permutation ( len ( dataset ) )
training_count = int ( len ( indices ) * ratio )
train = torch . utils . data . SubsetRandomSampler ( indices = indices [ : training_count ] )
validate = torch . utils . data . SubsetRandomSampler ( indices = indices [ training_count : ] )
return ( train , validate ) |
def inspect ( lines ) :
"""Inspect SDFile list of string
Returns :
tuple : ( data label list , number of records )""" | labels = set ( )
count = 0
exp = re . compile ( r">.*?<([\w ]+)>" )
# Space should be accepted
valid = False
for line in lines :
if line . startswith ( "M END\n" ) :
valid = True
elif line . startswith ( "$$$$" ) :
count += 1
valid = False
else :
result = exp . match ( line )
if result :
labels . add ( result . group ( 1 ) )
if valid :
count += 1
return list ( labels ) , count |
def standardize ( Y , in_place = False ) :
"""standardize Y in a way that is robust to missing values
in _ place : create a copy or carry out inplace opreations ?""" | if in_place :
YY = Y
else :
YY = Y . copy ( )
for i in range ( YY . shape [ 1 ] ) :
Iok = ~ SP . isnan ( YY [ : , i ] )
Ym = YY [ Iok , i ] . mean ( )
YY [ : , i ] -= Ym
Ys = YY [ Iok , i ] . std ( )
YY [ : , i ] /= Ys
return YY |
def _get_maxentscan ( data ) :
"""The plugin executes the logic from one of the scripts depending on which
splice region the variant overlaps :
score5 . pl : last 3 bases of exon - - > first 6 bases of intron
score3 . pl : last 20 bases of intron - - > first 3 bases of exon
The plugin reports the reference , alternate and difference ( REF - ALT ) maximumentropy scores .
https : / / github . com / Ensembl / VEP _ plugins / blob / master / MaxEntScan . pm""" | maxentscan_dir = os . path . dirname ( os . path . realpath ( config_utils . get_program ( "maxentscan_score3.pl" , data [ "config" ] ) ) )
if maxentscan_dir and os . path . exists ( maxentscan_dir ) :
return [ "--plugin" , "MaxEntScan,%s" % ( maxentscan_dir ) ]
else :
return [ ] |
def revoke_user ( self , user_id ) :
"""Revoke a user from the tenant
This will remove pending or approved roles but
will not not delete the user from Keystone .""" | uri = 'openstack/users/%s' % user_id
try :
resp = self . delete ( uri )
except AttributeError : # note : this breaks . stacktask returns a string , not json .
return
self . expected_success ( 200 , resp . status )
return rest_client . ResponseBody ( resp , None ) |
def copy_analysis_files ( cls , orig_dir , dest_dir , copyfiles ) :
"""Copy a list of files from orig _ dir to dest _ dir""" | for pattern in copyfiles :
glob_path = os . path . join ( orig_dir , pattern )
files = glob . glob ( glob_path )
for ff in files :
f = os . path . basename ( ff )
orig_path = os . path . join ( orig_dir , f )
dest_path = os . path . join ( dest_dir , f )
try :
copyfile ( orig_path , dest_path )
except IOError :
sys . stderr . write ( "WARNING: failed to copy %s\n" % orig_path ) |
def get_simulant_creator ( self ) -> Callable [ [ int , Union [ Mapping [ str , Any ] , None ] ] , pd . Index ] :
"""Grabs a reference to the function that creates new simulants ( adds rows to the state table ) .
Returns
Callable
The simulant creator function . The creator function takes the number of simulants to be
created as it ' s first argument and a dict or other mapping of population configuration
that will be available to simulant initializers as it ' s second argument . It generates
the new rows in the population state table and then calls each initializer
registered with the population system with a data object containing the state table
index of the new simulants , the configuration info passed to the creator , the current
simulation time , and the size of the next time step .""" | return self . _population_manager . get_simulant_creator ( ) |
def set_file_params ( self , reopen_on_reload = None , trucate_on_statup = None , max_size = None , rotation_fname = None , touch_reopen = None , touch_rotate = None , owner = None , mode = None ) :
"""Set various parameters related to file logging .
: param bool reopen _ on _ reload : Reopen log after reload .
: param bool trucate _ on _ statup : Truncate log on startup .
: param int max _ size : Set maximum logfile size in bytes after which log should be rotated .
: param str | unicode rotation _ fname : Set log file name after rotation .
: param str | unicode | list touch _ reopen : Trigger log reopen if the specified file
is modified / touched .
. . note : : This can be set to a file touched by ` ` postrotate ` ` script of ` ` logrotate ` `
to implement rotation .
: param str | unicode | list touch _ rotate : Trigger log rotation if the specified file
is modified / touched .
: param str | unicode owner : Set owner chown ( ) for logs .
: param str | unicode mode : Set mode chmod ( ) for logs .""" | self . _set ( 'log-reopen' , reopen_on_reload , cast = bool )
self . _set ( 'log-truncate' , trucate_on_statup , cast = bool )
self . _set ( 'log-maxsize' , max_size )
self . _set ( 'log-backupname' , rotation_fname )
self . _set ( 'touch-logreopen' , touch_reopen , multi = True )
self . _set ( 'touch-logrotate' , touch_rotate , multi = True )
self . _set ( 'logfile-chown' , owner )
self . _set ( 'logfile-chmod' , mode )
return self . _section |
def main ( ) :
"""Entry point for console script jp2dump .""" | kwargs = { 'description' : 'Print JPEG2000 metadata.' , 'formatter_class' : argparse . ArgumentDefaultsHelpFormatter }
parser = argparse . ArgumentParser ( ** kwargs )
parser . add_argument ( '-x' , '--noxml' , help = 'suppress XML' , action = 'store_true' )
parser . add_argument ( '-s' , '--short' , help = 'only print box id, offset, and length' , action = 'store_true' )
chelp = 'Level of codestream information. 0 suppresses all details, '
chelp += '1 prints the main header, 2 prints the full codestream.'
parser . add_argument ( '-c' , '--codestream' , help = chelp , metavar = 'LEVEL' , nargs = 1 , type = int , default = [ 1 ] )
parser . add_argument ( 'filename' )
args = parser . parse_args ( )
if args . noxml :
set_option ( 'print.xml' , False )
if args . short :
set_option ( 'print.short' , True )
codestream_level = args . codestream [ 0 ]
if codestream_level not in [ 0 , 1 , 2 ] :
raise ValueError ( "Invalid level of codestream information specified." )
if codestream_level == 0 :
set_option ( 'print.codestream' , False )
elif codestream_level == 2 :
set_option ( 'parse.full_codestream' , True )
filename = args . filename
# JP2 metadata can be extensive , so don ' t print any warnings until we
# are done with the metadata .
with warnings . catch_warnings ( record = True ) as wctx :
jp2 = Jp2k ( filename )
if jp2 . _codec_format == lib . openjp2 . CODEC_J2K :
if codestream_level == 0 :
print ( 'File: {0}' . format ( os . path . basename ( filename ) ) )
elif codestream_level == 1 :
print ( jp2 )
elif codestream_level == 2 :
print ( 'File: {0}' . format ( os . path . basename ( filename ) ) )
print ( jp2 . get_codestream ( header_only = False ) )
else :
print ( jp2 )
# Now re - emit any suppressed warnings .
if len ( wctx ) > 0 :
print ( "\n" )
for warning in wctx :
print ( "{0}:{1}: {2}: {3}" . format ( warning . filename , warning . lineno , warning . category . __name__ , warning . message ) ) |
def delete_image_tar ( file_obj , tar ) :
'''delete image tar will close a file object ( if extracted into
memory ) or delete from the file system ( if saved to disk )''' | try :
file_obj . close ( )
except :
tar . close ( )
if os . path . exists ( file_obj ) :
os . remove ( file_obj )
deleted = True
bot . debug ( 'Deleted temporary tar.' )
return deleted |
def GetFileObjectReferenceCount ( self , path_spec ) :
"""Retrieves the reference count of a cached file - like object .
Args :
path _ spec ( PathSpec ) : path specification .
Returns :
int : reference count or None if there is no file - like object for
the corresponding path specification cached .""" | cache_value = self . _file_object_cache . GetCacheValue ( path_spec . comparable )
if not cache_value :
return None
return cache_value . reference_count |
def get_live_league_games ( self ) :
"""Returns a dictionary containing a list of ticked games in progress
: return : dictionary of live games , see : doc : ` responses < / responses > `""" | url = self . __build_url ( urls . GET_LIVE_LEAGUE_GAMES )
req = self . executor ( url )
if self . logger :
self . logger . info ( 'URL: {0}' . format ( url ) )
if not self . __check_http_err ( req . status_code ) :
return response . build ( req , url , self . raw_mode ) |
def configure_roles_on_host ( api , host ) :
"""Go through all the roles on this host , and configure them if they
match the role types that we care about .""" | for role_ref in host . roleRefs : # Mgmt service / role has no cluster name . Skip over those .
if role_ref . get ( 'clusterName' ) is None :
continue
# Get the role and inspect the role type
role = api . get_cluster ( role_ref [ 'clusterName' ] ) . get_service ( role_ref [ 'serviceName' ] ) . get_role ( role_ref [ 'roleName' ] )
LOG . debug ( "Evaluating %s (%s)" % ( role . name , host . hostname ) )
config = None
if role . type == 'DATANODE' :
config = DATANODE_CONF
elif role . type == 'TASKTRACKER' :
config = TASKTRACKER_CONF
elif role . type == 'REGIONSERVER' :
config = REGIONSERVER_CONF
else :
continue
# Set the config
LOG . info ( "Configuring %s (%s)" % ( role . name , host . hostname ) )
role . update_config ( config ) |
def get_searches ( self , quick = False , saved = True ) :
'''Get searches listing .
: param quick bool : Include quick searches ( default False )
: param quick saved : Include saved searches ( default True )
: returns : : py : class : ` planet . api . models . Searches `
: raises planet . api . exceptions . APIException : On API error .''' | params = { }
if saved and not quick :
params [ 'search_type' ] = 'saved'
elif quick :
params [ 'search_type' ] = 'quick'
return self . _get ( self . _url ( 'data/v1/searches/' ) , body_type = models . Searches , params = params ) . get_body ( ) |
def identifier ( self , camelsplit = False , ascii = True ) :
"""return a python identifier from the string ( underscore separators )""" | return self . nameify ( camelsplit = camelsplit , ascii = ascii , sep = '_' ) |
def download ( self , data , block_num = - 1 ) :
"""Downloads a DB data into the AG .
A whole block ( including header and footer ) must be available into the
user buffer .
: param block _ num : New Block number ( or - 1)
: param data : the user buffer""" | type_ = c_byte
size = len ( data )
cdata = ( type_ * len ( data ) ) . from_buffer_copy ( data )
result = self . library . Cli_Download ( self . pointer , block_num , byref ( cdata ) , size )
return result |
def create_dynamic ( cls , name , interface_id , dynamic_index = 1 , reverse_connection = True , automatic_default_route = True , domain_server_address = None , loopback_ndi = '127.0.0.1' , location_ref = None , log_server_ref = None , zone_ref = None , enable_gti = False , enable_antivirus = False , sidewinder_proxy_enabled = False , default_nat = False , comment = None , ** kw ) :
"""Create a single layer 3 firewall with only a single DHCP interface . Useful
when creating virtualized FW ' s such as in Microsoft Azure .
: param str name : name of engine
: param str , int interface _ id : interface ID used for dynamic interface and management
: param bool reverse _ connection : specifies the dynamic interface should initiate connections
to management ( default : True )
: param bool automatic _ default _ route : allow SMC to create a dynamic netlink for the default
route ( default : True )
: param list domain _ server _ address : list of IP addresses for engine DNS
: param str loopback _ ndi : IP address for a loopback NDI . When creating a dynamic engine , the
` auth _ request ` must be set to a different interface , so loopback is created
: param str location _ ref : location by name for the engine
: param str log _ server _ ref : log server reference , will use the""" | interfaces = kw . pop ( 'interfaces' , [ ] )
# Add the primary interface to the interface list
interface = { 'interface_id' : interface_id , 'interface' : 'single_node_interface' , 'zone_ref' : zone_ref , 'interfaces' : [ { 'nodes' : [ { 'dynamic' : True , 'dynamic_index' : dynamic_index , 'nodeid' : 1 , 'reverse_connection' : reverse_connection , 'automatic_default_route' : automatic_default_route } ] } ] }
interfaces . append ( interface )
loopback = LoopbackInterface . create ( address = loopback_ndi , nodeid = 1 , auth_request = True , rank = 1 )
return Layer3Firewall . create_bulk ( name = name , node_type = 'firewall_node' , primary_mgt = interface_id , interfaces = interfaces , loopback_ndi = [ loopback . data ] , domain_server_address = domain_server_address , log_server_ref = log_server_ref , nodes = 1 , enable_gti = enable_gti , enable_antivirus = enable_antivirus , sidewinder_proxy_enabled = sidewinder_proxy_enabled , default_nat = default_nat , location_ref = location_ref , comment = comment , ** kw ) |
def isscalar ( cls , dataset , dim ) :
"""Tests if dimension is scalar in each subpath .""" | if not dataset . data :
return True
ds = cls . _inner_dataset_template ( dataset )
isscalar = [ ]
for d in dataset . data :
ds . data = d
isscalar . append ( ds . interface . isscalar ( ds , dim ) )
return all ( isscalar ) |
def geojson_polygon_to_mask ( feature , shape , lat_idx , lon_idx ) :
"""Convert a GeoJSON polygon feature to a numpy array
Args :
feature ( pygeoj . Feature ) : polygon feature to draw
shape ( tuple ( int , int ) ) : shape of 2D target numpy array to draw polygon in
lat _ idx ( func ) : function converting a latitude to the ( fractional ) row index in the map
lon _ idx ( func ) : function converting a longitude to the ( fractional ) column index in the map
Returns :
np . array : mask , background is zero , foreground is one""" | import matplotlib
# specify ' agg ' renderer , Mac renderer does not support what we want to do below
matplotlib . use ( 'agg' )
import matplotlib . pyplot as plt
from matplotlib import patches
import numpy as np
# we can only do polygons right now
if feature . geometry . type not in ( 'Polygon' , 'MultiPolygon' ) :
raise ValueError ( "Cannot handle feature of type " + feature . geometry . type )
# fictional dpi - don ' t matter in the end
dpi = 100
# - - start documentation include : poly - setup
# make a new figure with no frame , no axes , with the correct size , black background
fig = plt . figure ( frameon = False , dpi = dpi , )
fig . set_size_inches ( shape [ 1 ] / float ( dpi ) , shape [ 0 ] / float ( dpi ) )
ax = plt . Axes ( fig , [ 0. , 0. , 1. , 1. ] )
ax . set_axis_off ( )
# noinspection PyTypeChecker
ax . set_xlim ( [ 0 , shape [ 1 ] ] )
# noinspection PyTypeChecker
ax . set_ylim ( [ 0 , shape [ 0 ] ] )
fig . add_axes ( ax )
# - - end documentation include : poly - setup
# for normal polygons make coordinates iterable
if feature . geometry . type == 'Polygon' :
coords = [ feature . geometry . coordinates ]
else :
coords = feature . geometry . coordinates
for poly_coords in coords : # the polygon may contain multiple outlines ; the first is
# always the outer one , the others are ' holes '
for i , outline in enumerate ( poly_coords ) : # inside / outside fill value : figure background is white by
# default , draw inverted polygon and invert again later
value = 0. if i == 0 else 1.
# convert lats / lons to row / column indices in the array
outline = np . array ( outline )
xs = lon_idx ( outline [ : , 0 ] )
ys = lat_idx ( outline [ : , 1 ] )
# draw the polygon
poly = patches . Polygon ( list ( zip ( xs , ys ) ) , facecolor = ( value , value , value ) , edgecolor = 'none' , antialiased = True )
ax . add_patch ( poly )
# - - start documentation include : poly - extract
# extract the figure to a numpy array ,
fig . canvas . draw ( )
data = np . fromstring ( fig . canvas . tostring_rgb ( ) , dtype = np . uint8 , sep = '' )
# reshape to a proper numpy array , keep one channel only
data = data . reshape ( fig . canvas . get_width_height ( ) [ : : - 1 ] + ( 3 , ) ) [ : , : , 0 ]
# - - end documentation include : poly - extract
# make sure we get the right shape back
assert data . shape [ 0 ] == shape [ 0 ]
assert data . shape [ 1 ] == shape [ 1 ]
# convert from uints back to floats and invert to get black background
data = 1. - data . astype ( float ) / 255.
# type : np . array
# image is flipped horizontally w . r . t . map
data = data [ : : - 1 , : ]
# done , clean up
plt . close ( 'all' )
return data |
def readline ( self , size = - 1 ) :
"The size is ignored since a complete line must be read ." | line = self . fin . readline ( )
if not line :
return ''
return self . process_line ( line . rstrip ( '\n' ) ) |
def _finalize ( self ) :
"""Dump traces using cPickle .""" | container = { }
try :
for name in self . _traces :
container [ name ] = self . _traces [ name ] . _trace
container [ '_state_' ] = self . _state_
file = open ( self . filename , 'w+b' )
std_pickle . dump ( container , file )
file . close ( )
except AttributeError :
pass |
def create_role_config_group ( resource_root , service_name , name , display_name , role_type , cluster_name = "default" ) :
"""Create a role config group .
@ param resource _ root : The root Resource object .
@ param service _ name : Service name .
@ param name : The name of the new group .
@ param display _ name : The display name of the new group .
@ param role _ type : The role type of the new group .
@ param cluster _ name : Cluster name .
@ return : List of created role config groups .""" | apigroup = ApiRoleConfigGroup ( resource_root , name , display_name , role_type )
return create_role_config_groups ( resource_root , service_name , [ apigroup ] , cluster_name ) [ 0 ] |
def rpc_get_completions ( self , filename , source , offset ) :
"""Get a list of completion candidates for the symbol at offset .""" | results = self . _call_backend ( "rpc_get_completions" , [ ] , filename , get_source ( source ) , offset )
# Uniquify by name
results = list ( dict ( ( res [ 'name' ] , res ) for res in results ) . values ( ) )
results . sort ( key = lambda cand : _pysymbol_key ( cand [ "name" ] ) )
return results |
def summary ( * samples ) :
"""Run SignatureCompareRelatedSimple module from qsignature tool .
Creates a matrix of pairwise comparison among samples . The
function will not run if the output exists
: param samples : list with only one element containing all samples information
: returns : ( dict ) with the path of the output to be joined to summary""" | warnings , similar = [ ] , [ ]
qsig = config_utils . get_program ( "qsignature" , samples [ 0 ] [ 0 ] [ "config" ] )
if not qsig :
return [ [ ] ]
res_qsig = config_utils . get_resources ( "qsignature" , samples [ 0 ] [ 0 ] [ "config" ] )
jvm_opts = " " . join ( res_qsig . get ( "jvm_opts" , [ "-Xms750m" , "-Xmx8g" ] ) )
work_dir = samples [ 0 ] [ 0 ] [ "dirs" ] [ "work" ]
count = 0
for data in samples :
data = data [ 0 ]
vcf = tz . get_in ( [ "summary" , "qc" , "qsignature" , "base" ] , data )
if vcf :
count += 1
vcf_name = dd . get_sample_name ( data ) + ".qsig.vcf"
out_dir = utils . safe_makedir ( os . path . join ( work_dir , "qsignature" ) )
if not os . path . lexists ( os . path . join ( out_dir , vcf_name ) ) :
os . symlink ( vcf , os . path . join ( out_dir , vcf_name ) )
if count > 0 :
qc_out_dir = utils . safe_makedir ( os . path . join ( work_dir , "qc" , "qsignature" ) )
out_file = os . path . join ( qc_out_dir , "qsignature.xml" )
out_ma_file = os . path . join ( qc_out_dir , "qsignature.ma" )
out_warn_file = os . path . join ( qc_out_dir , "qsignature.warnings" )
log = os . path . join ( work_dir , "qsignature" , "qsig-summary.log" )
if not os . path . exists ( out_file ) :
with file_transaction ( samples [ 0 ] [ 0 ] , out_file ) as file_txt_out :
base_cmd = ( "{qsig} {jvm_opts} " "org.qcmg.sig.SignatureCompareRelatedSimple " "-log {log} -dir {out_dir} " "-o {file_txt_out} " )
do . run ( base_cmd . format ( ** locals ( ) ) , "qsignature score calculation" )
error , warnings , similar = _parse_qsignature_output ( out_file , out_ma_file , out_warn_file , samples [ 0 ] [ 0 ] )
return [ { 'total samples' : count , 'similar samples pairs' : len ( similar ) , 'warnings samples pairs' : len ( warnings ) , 'error samples' : list ( error ) , 'out_dir' : qc_out_dir } ]
else :
return [ ] |
def _get_mod_ns ( name , fullname , includeprivate ) :
"""Return the template context of module identified by ` fullname ` as a
dict""" | ns = { # template variables
'name' : name , 'fullname' : fullname , 'members' : [ ] , 'functions' : [ ] , 'classes' : [ ] , 'exceptions' : [ ] , 'subpackages' : [ ] , 'submodules' : [ ] , 'doc' : None }
p = 0
if includeprivate :
p = 1
mod = importlib . import_module ( fullname )
ns [ 'members' ] = _get_members ( mod ) [ p ]
ns [ 'functions' ] = _get_members ( mod , typ = 'function' ) [ p ]
ns [ 'classes' ] = _get_members ( mod , typ = 'class' ) [ p ]
ns [ 'exceptions' ] = _get_members ( mod , typ = 'exception' ) [ p ]
ns [ 'data' ] = _get_members ( mod , typ = 'data' ) [ p ]
ns [ 'doc' ] = mod . __doc__
return ns |
def set_selected_submission ( self , course , task , submissionid ) :
"""Set submission whose id is ` submissionid ` to selected grading submission for the given course / task .
Returns a boolean indicating whether the operation was successful or not .""" | submission = self . submission_manager . get_submission ( submissionid )
# Do not continue if submission does not exist or is not owned by current user
if not submission :
return False
# Check if the submission if from this task / course !
if submission [ "taskid" ] != task . get_id ( ) or submission [ "courseid" ] != course . get_id ( ) :
return False
is_staff = self . user_manager . has_staff_rights_on_course ( course , self . user_manager . session_username ( ) )
# Do not enable submission selection after deadline
if not task . get_accessible_time ( ) . is_open ( ) and not is_staff :
return False
# Only allow to set submission if the student must choose their best submission themselves
if task . get_evaluate ( ) != 'student' and not is_staff :
return False
# Check if task is done per group / team
if task . is_group_task ( ) and not is_staff :
group = self . database . aggregations . find_one ( { "courseid" : task . get_course_id ( ) , "groups.students" : self . user_manager . session_username ( ) } , { "groups" : { "$elemMatch" : { "students" : self . user_manager . session_username ( ) } } } )
students = group [ "groups" ] [ 0 ] [ "students" ]
else :
students = [ self . user_manager . session_username ( ) ]
# Check if group / team is the same
if students == submission [ "username" ] :
self . database . user_tasks . update_many ( { "courseid" : task . get_course_id ( ) , "taskid" : task . get_id ( ) , "username" : { "$in" : students } } , { "$set" : { "submissionid" : submission [ '_id' ] , "grade" : submission [ 'grade' ] , "succeeded" : submission [ "result" ] == "success" } } )
return True
else :
return False |
def list_metadata ( self , resource ) :
"""List all keys associated with the given resource .
Args :
resource ( intern . resource . boss . BossResource )
Returns :
( list )
Raises :
requests . HTTPError on a failure .""" | self . metadata_service . set_auth ( self . _token_metadata )
return self . metadata_service . list ( resource ) |
def parse_JSON ( self , JSON_string ) :
"""Parses a * pyowm . stationsapi30 . measurement . AggregatedMeasurement *
instance out of raw JSON data .
: param JSON _ string : a raw JSON string
: type JSON _ string : str
: return : a * pyowm . stationsapi30 . measurement . AggregatedMeasurement *
instance or ` ` None ` ` if no data is available
: raises : * ParseResponseError * if it is impossible to find or parse the
data needed to build the result""" | if JSON_string is None :
raise parse_response_error . ParseResponseError ( 'JSON data is None' )
d = json . loads ( JSON_string )
station_id = d . get ( 'station_id' , None )
ts = d . get ( 'date' , None )
if ts is not None :
ts = int ( ts )
aggregated_on = d . get ( 'type' , None )
temp = d . get ( 'temp' , dict ( ) )
humidity = d . get ( 'humidity' , dict ( ) )
wind = d . get ( 'wind' , dict ( ) )
pressure = d . get ( 'pressure' , dict ( ) )
precipitation = d . get ( 'precipitation' , dict ( ) )
return AggregatedMeasurement ( station_id , ts , aggregated_on , temp = temp , humidity = humidity , wind = wind , pressure = pressure , precipitation = precipitation ) |
def get_disk_quota ( username , machine_name = None ) :
"""Returns disk quota for username in KB""" | try :
ua = Account . objects . get ( username = username , date_deleted__isnull = True )
except Account . DoesNotExist :
return 'Account not found'
result = ua . get_disk_quota ( )
if result is None :
return False
return result * 1048576 |
def _safe_db ( num , den ) :
"""Properly handle the potential + Inf db SIR instead of raising a
RuntimeWarning .""" | if den == 0 :
return np . inf
return 10 * np . log10 ( num / den ) |
def find_or_build ( cls , ** kwargs ) :
"""Checks if an instance already exists in db with these kwargs else
returns a new , saved instance of the service ' s model class .
Args :
* * kwargs : instance parameters""" | keys = kwargs . pop ( 'keys' ) if 'keys' in kwargs else [ ]
return cls . first ( ** subdict ( kwargs , keys ) ) or cls . build ( ** kwargs ) |
def list ( self , ** params ) :
"""Retrieve all deal unqualified reasons
Returns all deal unqualified reasons available to the user according to the parameters provided
: calls : ` ` get / deal _ unqualified _ reasons ` `
: param dict params : ( optional ) Search options .
: return : List of dictionaries that support attriubte - style access , which represent collection of DealUnqualifiedReasons .
: rtype : list""" | _ , _ , deal_unqualified_reasons = self . http_client . get ( "/deal_unqualified_reasons" , params = params )
return deal_unqualified_reasons |
def _get_msge_with_gradient ( data , delta , xvschema , skipstep , p ) :
"""Calculate mean squared generalization error and its gradient ,
automatically selecting the best function .""" | t , m , l = data . shape
n = ( l - p ) * t
underdetermined = n < m * p
if underdetermined :
return _msge_with_gradient_underdetermined ( data , delta , xvschema , skipstep , p )
else :
return _msge_with_gradient_overdetermined ( data , delta , xvschema , skipstep , p ) |
def _validate ( self ) :
"""Checks whether all the attributes of this object is valid .""" | if self . region_type not in regions_attributes :
raise ValueError ( "'{0}' is not a valid region type in this package" . format ( self . region_type ) )
if self . coordsys not in valid_coordsys [ 'DS9' ] + valid_coordsys [ 'CRTF' ] :
raise ValueError ( "'{0}' is not a valid coordinate reference frame " "in astropy" . format ( self . coordsys ) ) |
def check_code ( self , card_id , codes ) :
"""核查code""" | card_data = { 'card_id' : card_id , 'code' : codes }
return self . _post ( 'card/code/checkcode' , data = card_data ) |
def merge ( self , other ) : # type : ( TentativeType ) - > None
"""Merge two TentativeType instances""" | for hashables in other . types_hashable :
self . add ( hashables )
for non_hashbles in other . types :
self . add ( non_hashbles ) |
def tone_marks ( ) :
"""Keep tone - modifying punctuation by matching following character .
Assumes the ` tone _ marks ` pre - processor was run for cases where there might
not be any space after a tone - modifying punctuation mark .""" | return RegexBuilder ( pattern_args = symbols . TONE_MARKS , pattern_func = lambda x : u"(?<={})." . format ( x ) ) . regex |
def hide_columns ( self , subset ) :
"""Hide columns from rendering .
. . versionadded : : 0.23.0
Parameters
subset : IndexSlice
An argument to ` ` DataFrame . loc ` ` that identifies which columns
are hidden .
Returns
self : Styler""" | subset = _non_reducing_slice ( subset )
hidden_df = self . data . loc [ subset ]
self . hidden_columns = self . columns . get_indexer_for ( hidden_df . columns )
return self |
def next ( self ) :
"""Return the next available message
Blocks indefinitely unless consumer _ timeout _ ms > 0
Returns :
a single KafkaMessage from the message iterator
Raises :
ConsumerTimeout after consumer _ timeout _ ms and no message
Note :
This is also the method called internally during iteration""" | self . _set_consumer_timeout_start ( )
while True :
try :
return six . next ( self . _get_message_iterator ( ) )
# Handle batch completion
except StopIteration :
self . _reset_message_iterator ( )
self . _check_consumer_timeout ( ) |
def open_keyword_cache_path ( self ) :
"""Open File dialog to choose the keyword cache path .""" | # noinspection PyCallByClass , PyTypeChecker
file_name , __ = QFileDialog . getSaveFileName ( self , self . tr ( 'Set keyword cache file' ) , self . leKeywordCachePath . text ( ) , self . tr ( 'Sqlite DB File (*.db)' ) )
if file_name :
self . leKeywordCachePath . setText ( file_name ) |
def request ( self , method , url , params = None , data = None , headers = None , cookies = None , files = None , auth = None , timeout = None , allow_redirects = True , proxies = None , hooks = None , stream = None , verify = None , cert = None , json = None ) :
"""Constructs a : class : ` Request < Request > ` , prepares it and sends it .
Returns : class : ` Response < Response > ` object .
: param method : method for the new : class : ` Request ` object .
: param url : URL for the new : class : ` Request ` object .
: param params : ( optional ) Dictionary or bytes to be sent in the query
string for the : class : ` Request ` .
: param data : ( optional ) Dictionary , list of tuples , bytes , or file - like
object to send in the body of the : class : ` Request ` .
: param json : ( optional ) json to send in the body of the
: class : ` Request ` .
: param headers : ( optional ) Dictionary of HTTP Headers to send with the
: class : ` Request ` .
: param cookies : ( optional ) Dict or CookieJar object to send with the
: class : ` Request ` .
: param files : ( optional ) Dictionary of ` ` ' filename ' : file - like - objects ` `
for multipart encoding upload .
: param auth : ( optional ) Auth tuple or callable to enable
Basic / Digest / Custom HTTP Auth .
: param timeout : ( optional ) How long to wait for the server to send
data before giving up , as a float , or a : ref : ` ( connect timeout ,
read timeout ) < timeouts > ` tuple .
: type timeout : float or tuple
: param allow _ redirects : ( optional ) Set to True by default .
: type allow _ redirects : bool
: param proxies : ( optional ) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy .
: param stream : ( optional ) whether to immediately download the response
content . Defaults to ` ` False ` ` .
: param verify : ( optional ) Either a boolean , in which case it controls whether we verify
the server ' s TLS certificate , or a string , in which case it must be a path
to a CA bundle to use . Defaults to ` ` True ` ` .
: param cert : ( optional ) if String , path to ssl client cert file ( . pem ) .
If Tuple , ( ' cert ' , ' key ' ) pair .
: rtype : requests . Response""" | # Create the Request .
req = Request ( method = method . upper ( ) , url = url , headers = headers , files = files , data = data or { } , json = json , params = params or { } , auth = auth , cookies = cookies , hooks = hooks , )
prep = self . prepare_request ( req )
proxies = proxies or { }
settings = self . merge_environment_settings ( prep . url , proxies , stream , verify , cert )
# Send the request .
send_kwargs = { 'timeout' : timeout , 'allow_redirects' : allow_redirects , }
send_kwargs . update ( settings )
resp = self . send ( prep , ** send_kwargs )
return resp |
def cleanup ( ) :
"""deletes launch configs and auto scaling group""" | try :
cloud_config = CloudConfig ( )
cloud_controller = CloudController ( cloud_config )
cloud_controller . cleanup ( )
except CloudComposeException as ex :
print ( ex ) |
def upload_scripts ( client , script_dir , overwrite = True ) :
"""Uploads general - purpose scripts to a Google Storage bucket .
TODO : docstring""" | local_dir = os . path . join ( genometools . _root , 'data' , 'gcloud' , 'scripts' )
match = _BUCKET_PAT . match ( script_dir )
script_bucket = match . group ( 1 )
script_prefix = match . group ( 2 )
depth = len ( local_dir . split ( os . sep ) )
for root , dirs , files in os . walk ( local_dir ) :
rel_path = '/' . join ( root . split ( os . sep ) [ depth : ] )
for f in files :
local_path = os . path . join ( root , f )
if rel_path :
remote_path = '/' . join ( [ script_prefix , rel_path , f ] )
else :
remote_path = '/' . join ( [ script_prefix , f ] )
_LOGGER . info ( 'Uploading "%s"...' , remote_path )
storage . upload_file ( client , script_bucket , local_path , remote_path , overwrite = overwrite ) |
def walk ( patterns , dirname ) :
"""Like # os . walk ( ) , but filters the files and directories that are excluded by
the specified * patterns * .
# Arguments
patterns ( IgnoreList , IgnoreListCollection ) : Can also be any object that
implements the # IgnoreList . match ( ) interface .
dirname ( str ) : The directory to walk .""" | join = os . path . join
for root , dirs , files in os . walk ( dirname , topdown = True ) :
dirs [ : ] = [ d for d in dirs if patterns . match ( join ( root , d ) , True ) != MATCH_IGNORE ]
files [ : ] = [ f for f in files if patterns . match ( join ( root , f ) , False ) != MATCH_IGNORE ]
yield root , dirs , files |
def _load_service_containers ( self , service , configs , use_cache ) :
""": param service :
: return :""" | if not isinstance ( service , Service ) :
raise TypeError ( "service must of an instance of Service" )
if not service . containers :
container_name = self . _container_registration ( service . alias )
if service . dependencies :
self . _load_dependency_containers ( service )
if not service . cargo :
self . _load_service_cargo ( service , configs , use_cache )
self . _update_container_host_config ( service )
service . containers [ container_name ] = Container ( self . _client_session , container_name , service . cargo . id , container_config = service . container_config . to_dict ( ) , host_config = service . host_config . to_dict ( ) ) |
def get_elms ( self , id_ = None , class_name = None , name = None , tag_name = None , text = None , xpath = None , parent_id = None , parent_class_name = None , parent_name = None , parent_tag_name = None , css_selector = None ) :
"""Shortcut for : py : meth : ` find _ element * < selenium . webdriver . remote . webelement . WebElement . find _ element > `
methods . It ' s shorter and you can quickly find element in element .
. . code - block : : python
elm = driver . find _ element _ by _ id ( ' someid ' )
elm . find _ elements _ by _ class _ name ( ' someclasss ' )
# vs .
elm = driver . get _ elm ( parent _ id = ' someid ' , class _ name = ' someclass ' )
. . versionchanged : : 2.8
Added ` ` text ` ` param . Use it instead of old ` ` find _ element [ s ] _ by _ text ` ` methods .
Thanks to that it can be used also in ` ` wait _ for _ element * ` ` methods .""" | if parent_id or parent_class_name or parent_name or parent_tag_name :
parent = self . get_elm ( parent_id , parent_class_name , parent_name , parent_tag_name )
else :
parent = self
if len ( [ x for x in ( id_ , class_name , tag_name , text , xpath ) if x is not None ] ) > 1 :
raise Exception ( 'You can find element only by one param.' )
if id_ is not None :
return parent . find_elements_by_id ( id_ )
if class_name is not None :
return parent . find_elements_by_class_name ( class_name )
if name is not None :
return parent . find_elements_by_name ( name )
if tag_name is not None :
return parent . find_elements_by_tag_name ( tag_name )
if text is not None :
xpath = './/*/text()[contains(., "{}") and not(ancestor-or-self::*[@data-selenium-not-search])]/..' . format ( text )
if xpath is not None :
return parent . find_elements_by_xpath ( xpath )
if css_selector is not None :
return parent . find_elements_by_css_selector ( css_selector )
raise Exception ( 'You must specify id or name of element on which you want to click.' ) |
def render ( self ) :
"""Render html page and atom feed""" | context = GLOBAL_TEMPLATE_CONTEXT . copy ( )
context [ 'tag' ] = self
entries = list ( self . entries )
entries . sort ( key = operator . attrgetter ( 'date' ) , reverse = True )
context [ 'entries' ] = entries
# render html page
render_to = os . path . join ( CONFIG [ 'output_to' ] , 'tags' , self . slug )
if not os . path . exists ( render_to ) : # pragma : no coverage
os . makedirs ( render_to )
_render ( context , 'tag_index.html' , os . path . join ( render_to , 'index.html' ) )
# noqa
# render atom . xml
context [ 'entries' ] = context [ 'entries' ] [ : 10 ]
context [ 'last_build' ] = datetime . datetime . now ( ) . strftime ( "%Y-%m-%dT%H:%M:%SZ" )
# noqa
_render ( context , 'atom.xml' , os . path . join ( render_to , 'atom.xml' ) )
return True |
def description ( cls ) :
"""Return a field - > data type dictionary describing this model
as reported by the database .
: rtype : dict""" | description = { }
for column in cls . __table__ . columns : # pylint : disable = no - member
column_description = str ( column . type )
if not column . nullable :
column_description += ' (required)'
description [ column . name ] = column_description
return description |
def is_empty ( self ) :
"""Check whether this interval is empty .
: rtype : bool""" | if self . bounds [ 1 ] < self . bounds [ 0 ] :
return True
if self . bounds [ 1 ] == self . bounds [ 0 ] :
return not ( self . included [ 0 ] and self . included [ 1 ] ) |
def __instances ( self ) :
"""Cache instances , allowing generators to be used and reused .
This fills a cache as the generator gets emptied , eventually
reading exclusively from the cache .""" | for instance in self . __instances_cache :
yield instance
for instance in self . __instances_original :
self . __instances_cache . append ( instance )
yield instance |
def known_remotes ( self ) :
"""The names of the configured remote repositories ( a list of : class : ` . Remote ` objects ) .""" | objects = [ ]
output = self . context . capture ( 'bzr' , 'config' , 'parent_location' , check = False , silent = True , )
if output and not output . isspace ( ) :
location = output . strip ( )
# The ` bzr branch ' command has the unusual habit of converting
# absolute pathnames into relative pathnames . Although I get why
# this can be preferred over the use of absolute pathnames I
# nevertheless want vcs - repo - mgr to communicate to its callers as
# unambiguously as possible , so if we detect a relative pathname
# we convert it to an absolute pathname .
if location . startswith ( '../' ) :
location = os . path . normpath ( os . path . join ( self . local , location ) )
objects . append ( Remote ( default = True , location = location , repository = self , roles = [ 'push' , 'pull' ] , ) )
return objects |
def get_config ( ) :
"""Return configuration for current session .
When called for the first time , this will create a config object , using
whatever is the default load path to find the config yaml""" | if session . config is None :
path = session . default_config_path
if os . path . isfile ( path ) :
logging . info ( "LOADING FROM: {}" . format ( path ) )
session . config = load_config ( path )
else :
session . config = Config ( )
logging . info ( "using default session: {}, path does not exist: {}" . format ( session , path ) )
else :
logging . info ( "Using pre-loaded object: {}" . format ( session . config ) )
return session . config |
def list_edges ( self , * , axis : int ) -> List [ str ] :
"""* * DEPRECATED * * - Use ` ds . row _ graphs . keys ( ) ` or ` ds . col _ graphs . keys ( ) ` instead""" | deprecated ( "'list_edges' is deprecated. Use 'ds.row_graphs.keys()' or 'ds.col_graphs.keys()' instead" )
if axis == 0 :
return self . row_graphs . keys ( )
elif axis == 1 :
return self . col_graphs . keys ( )
else :
return [ ] |
def cnst_A1T ( self , Y1 ) :
r"""Compute : math : ` A _ 1 ^ T \ mathbf { y } _ 1 ` component of
: math : ` A ^ T \ mathbf { y } ` . In this case : math : ` A _ 1 ^ T \ mathbf { y } _ 1 =
( \ Gamma _ 0 ^ T \ ; \ ; \ Gamma _ 1 ^ T \ ; \ ; \ ldots ) \ mathbf { y } _ 1 ` .""" | Y1f = sl . rfftn ( Y1 , None , axes = self . cri . axisN )
return sl . irfftn ( np . conj ( self . GDf ) * Y1f , self . cri . Nv , self . cri . axisN ) |
def get_nodes_by_source ( self , graph , source_full_name ) :
"""yields nodes from graph are the specified source .""" | parts = source_full_name . split ( '.' )
if len ( parts ) == 1 :
target_source , target_table = parts [ 0 ] , None
elif len ( parts ) == 2 :
target_source , target_table = parts
else : # len ( parts ) > 2 or len ( parts ) = = 0
msg = ( 'Invalid source selector value "{}". Sources must be of the ' 'form `${{source_name}}` or ' '`${{source_name}}.${{target_name}}`' ) . format ( source_full_name )
raise dbt . exceptions . RuntimeException ( msg )
for node , real_node in self . source_nodes ( graph ) :
if target_source not in ( real_node . source_name , SELECTOR_GLOB ) :
continue
if target_table in ( None , real_node . name , SELECTOR_GLOB ) :
yield node |
def list2dict ( list_of_options ) :
"""Transforms a list of 2 element tuples to a dictionary""" | d = { }
for key , value in list_of_options :
d [ key ] = value
return d |
def accept ( self ) :
"""Launch the multi exposure analysis .""" | if not isinstance ( self . _multi_exposure_if , MultiExposureImpactFunction ) : # This should not happen as the " accept " button must be disabled if
# the impact function is not ready .
return ANALYSIS_FAILED_BAD_CODE , None
self . tab_widget . setCurrentIndex ( 2 )
self . set_enabled_buttons ( False )
enable_busy_cursor ( )
try :
code , message , exposure = self . _multi_exposure_if . run ( )
message = basestring_to_message ( message )
if code == ANALYSIS_FAILED_BAD_INPUT :
LOGGER . warning ( tr ( 'The impact function could not run because of the inputs.' ) )
send_error_message ( self , message )
LOGGER . warning ( message . to_text ( ) )
disable_busy_cursor ( )
self . set_enabled_buttons ( True )
return code , message
elif code == ANALYSIS_FAILED_BAD_CODE :
LOGGER . warning ( tr ( 'The impact function could not run because of a bug.' ) )
LOGGER . exception ( message . to_text ( ) )
send_error_message ( self , message )
disable_busy_cursor ( )
self . set_enabled_buttons ( True )
return code , message
if setting ( 'generate_report' , True , bool ) :
LOGGER . info ( 'Reports are going to be generated for the multiexposure.' )
# Report for the multi exposure
report = [ standard_multi_exposure_impact_report_metadata_html ]
error_code , message = ( self . _multi_exposure_if . generate_report ( report ) )
message = basestring_to_message ( message )
if error_code == ImpactReport . REPORT_GENERATION_FAILED :
LOGGER . warning ( 'The impact report could not be generated.' )
send_error_message ( self , message )
LOGGER . exception ( message . to_text ( ) )
disable_busy_cursor ( )
self . set_enabled_buttons ( True )
return error_code , message
else :
LOGGER . warning ( 'Reports are not generated because of your settings.' )
display_warning_message_bar ( tr ( 'Reports' ) , tr ( 'Reports are not going to be generated because of your ' 'InaSAFE settings. Check your InaSAFE settings, in the ' 'advanced panel with the developer mode enabled.' ) , duration = 10 , iface_object = self . iface )
# We always create the multi exposure group because we need
# reports to be generated .
root = QgsProject . instance ( ) . layerTreeRoot ( )
if len ( self . ordered_expected_layers ( ) ) == 0 :
group_analysis = root . insertGroup ( 0 , self . _multi_exposure_if . name )
group_analysis . setItemVisibilityChecked ( True )
group_analysis . setCustomProperty ( MULTI_EXPOSURE_ANALYSIS_FLAG , True )
for layer in self . _multi_exposure_if . outputs :
QgsProject . instance ( ) . addMapLayer ( layer , False )
layer_node = group_analysis . addLayer ( layer )
layer_node . setItemVisibilityChecked ( False )
# set layer title if any
try :
title = layer . keywords [ 'title' ]
if qgis_version ( ) >= 21800 :
layer . setName ( title )
else :
layer . setLayerName ( title )
except KeyError :
pass
for analysis in self . _multi_exposure_if . impact_functions :
detailed_group = group_analysis . insertGroup ( 0 , analysis . name )
detailed_group . setItemVisibilityChecked ( True )
add_impact_layers_to_canvas ( analysis , group = detailed_group )
if self . iface :
self . iface . setActiveLayer ( self . _multi_exposure_if . analysis_impacted )
else :
add_layers_to_canvas_with_custom_orders ( self . ordered_expected_layers ( ) , self . _multi_exposure_if , self . iface )
if setting ( 'generate_report' , True , bool ) :
LOGGER . info ( 'Reports are going to be generated for each single ' 'exposure.' )
# Report for the single exposure with hazard
for analysis in self . _multi_exposure_if . impact_functions : # we only want to generate non pdf / qpt report
html_components = [ standard_impact_report_metadata_html ]
error_code , message = ( analysis . generate_report ( html_components ) )
message = basestring_to_message ( message )
if error_code == ( ImpactReport . REPORT_GENERATION_FAILED ) :
LOGGER . info ( 'The impact report could not be generated.' )
send_error_message ( self , message )
LOGGER . info ( message . to_text ( ) )
disable_busy_cursor ( )
self . set_enabled_buttons ( True )
return error_code , message
else :
LOGGER . info ( 'Reports are not generated because of your settings.' )
display_warning_message_bar ( tr ( 'Reports' ) , tr ( 'Reports are not going to be generated because of your ' 'InaSAFE settings. Check your InaSAFE settings, in the ' 'advanced panel with the developer mode enabled.' ) , duration = 10 , iface_object = self . iface )
# If zoom to impact is enabled
if setting ( 'setZoomToImpactFlag' , expected_type = bool ) :
self . iface . zoomToActiveLayer ( )
# If hide exposure layers
if setting ( 'setHideExposureFlag' , expected_type = bool ) :
treeroot = QgsProject . instance ( ) . layerTreeRoot ( )
for combo in list ( self . combos_exposures . values ( ) ) :
layer = layer_from_combo ( combo )
if layer is not None :
treelayer = treeroot . findLayer ( layer . id ( ) )
if treelayer :
treelayer . setItemVisibilityChecked ( False )
# Set last analysis extent
self . _extent . set_last_analysis_extent ( self . _multi_exposure_if . analysis_extent , self . _multi_exposure_if . crs )
self . done ( QDialog . Accepted )
except Exception as e :
error_message = get_error_message ( e )
send_error_message ( self , error_message )
LOGGER . exception ( e )
LOGGER . debug ( error_message . to_text ( ) )
finally :
disable_busy_cursor ( )
self . set_enabled_buttons ( True ) |
def _tzstr ( self , sep = ":" ) :
"""Return formatted timezone offset ( + xx : xx ) or None .""" | off = self . utcoffset ( )
if off is not None :
if off . days < 0 :
sign = "-"
off = - off
else :
sign = "+"
hh , mm = divmod ( off , timedelta ( hours = 1 ) )
assert not mm % timedelta ( minutes = 1 ) , "whole minute"
mm //= timedelta ( minutes = 1 )
assert 0 <= hh < 24
off = "%s%02d%s%02d" % ( sign , hh , sep , mm )
return off |
def send_cmd ( cmd , args , ret ) :
"""Collect and send analytics for CLI command .
Args :
args ( list ) : parsed args for the CLI command .
ret ( int ) : return value of the CLI command .""" | from dvc . daemon import daemon
if not Analytics . _is_enabled ( cmd ) :
return
analytics = Analytics ( )
analytics . collect_cmd ( args , ret )
daemon ( [ "analytics" , analytics . dump ( ) ] ) |
def bitpos ( self , key , bit , start = None , end = None ) :
"""Find first bit set or clear in a string .
: raises ValueError : if bit is not 0 or 1""" | if bit not in ( 1 , 0 ) :
raise ValueError ( "bit argument must be either 1 or 0" )
bytes_range = [ ]
if start is not None :
bytes_range . append ( start )
if end is not None :
if start is None :
bytes_range = [ 0 , end ]
else :
bytes_range . append ( end )
return self . execute ( b'BITPOS' , key , bit , * bytes_range ) |
def ascii2h5 ( bh_dir = None ) :
"""Convert the Burstein & Heiles ( 1982 ) dust map from ASCII to HDF5.""" | if bh_dir is None :
bh_dir = os . path . join ( data_dir_default , 'bh' )
fname = os . path . join ( bh_dir , '{}.ascii' )
f = h5py . File ( 'bh.h5' , 'w' )
for region in ( 'hinorth' , 'hisouth' ) :
data = np . loadtxt ( fname . format ( region ) , dtype = 'f4' )
# Reshape and clip
data . shape = ( 210 , 201 )
# ( R , N )
data = data [ : 201 ]
# Last 9 records are empty
# Use NaNs where no data
data [ data < - 9000 ] = np . nan
dset = f . create_dataset ( region , data = data , chunks = True , compression = 'gzip' , compression_opts = 3 )
dset . attrs [ 'axes' ] = ( 'R' , 'N' )
dset . attrs [ 'description' ] = ( 'HI 21cm column densities, in units of 10*NHYD. ' 'R = 100 + [(90^o-|b|) sin(l)]/[0.3 degrees]. ' 'N = 100 + [(90^o-|b|) cos (l)]/[0.3 degrees].' )
for region in ( 'rednorth' , 'redsouth' ) :
data = np . loadtxt ( fname . format ( region ) , dtype = 'f4' )
# Reshape and clip
data . shape = ( 94 , 1200 )
# ( R , N )
data = data [ : 93 ]
# Last record is empty
# Use NaNs where no data
data [ data < - 9000 ] = np . nan
dset = f . create_dataset ( region , data = data , chunks = True , compression = 'gzip' , compression_opts = 3 )
dset . attrs [ 'axes' ] = ( 'R' , 'N' )
dset . attrs [ 'description' ] = ( 'E(B-V), in units of 0.001 mag. ' 'R = (|b| - 10) / (0.6 degrees). ' 'N = (l + 0.15) / 0.3 - 1.' )
f . attrs [ 'description' ] = ( 'The Burstein & Heiles (1982) dust map.' )
f . close ( ) |
def equals ( df1 , df2 , ignore_order = set ( ) , ignore_indices = set ( ) , all_close = False , _return_reason = False ) :
'''Get whether 2 data frames are equal .
` ` NaN ` ` is considered equal to ` ` NaN ` ` and ` None ` .
Parameters
df1 : ~ pandas . DataFrame
Data frame to compare .
df2 : ~ pandas . DataFrame
Data frame to compare .
ignore _ order : ~ typing . Set [ int ]
Axi in which to ignore order .
ignore _ indices : ~ typing . Set [ int ]
Axi of which to ignore the index . E . g . ` ` { 1 } ` ` allows differences in
` ` df . columns . name ` ` and ` ` df . columns . equals ( df2 . columns ) ` ` .
all _ close : bool
If ` False ` , values must match exactly , if ` True ` , floats are compared as if
compared with ` numpy . isclose ` .
_ return _ reason : bool
Internal . If ` True ` , ` equals ` returns a tuple containing the reason , else
` equals ` only returns a bool indicating equality ( or equivalence
rather ) .
Returns
bool
Whether they are equal ( after ignoring according to the parameters ) .
Internal note : if ` ` _ return _ reason ` ` , ` ` Tuple [ bool , str or None ] ` ` is
returned . The former is whether they ' re equal , the latter is ` None ` if
equal or a short explanation of why the data frames aren ' t equal ,
otherwise .
Notes
All values ( including those of indices ) must be copyable and ` ` _ _ eq _ _ ` ` must
be such that a copy must equal its original . A value must equal itself
unless it ' s ` ` NaN ` ` . Values needn ' t be orderable or hashable ( however
pandas requires index values to be orderable and hashable ) . By consequence ,
this is not an efficient function , but it is flexible .
Examples
> > > from pytil import data _ frame as df _
> > > import pandas as pd
> > > df = pd . DataFrame ( [
. . . [ 1 , 2 , 3 ] ,
. . . [ 4 , 5 , 6 ] ,
. . . [ 7 , 8 , 9]
. . . index = pd . Index ( ( ' i1 ' , ' i2 ' , ' i3 ' ) , name = ' index1 ' ) ,
. . . columns = pd . Index ( ( ' c1 ' , ' c2 ' , ' c3 ' ) , name = ' columns1 ' )
> > > df
columns1 c1 c2 c3
index1
i1 1 2 3
i2 4 5 6
i3 7 8 9
> > > df2 = df . reindex ( ( ' i3 ' , ' i1 ' , ' i2 ' ) , columns = ( ' c2 ' , ' c1 ' , ' c3 ' ) )
> > > df2
columns1 c2 c1 c3
index1
i3 8 7 9
i1 2 1 3
i2 5 4 6
> > > df _ . equals ( df , df2)
False
> > > df _ . equals ( df , df2 , ignore _ order = ( 0,1 ) )
True
> > > df2 = df . copy ( )
> > > df2 . index = [ 1,2,3]
> > > df2
columns1 c1 c2 c3
1 1 2 3
2 4 5 6
3 7 8 9
> > > df _ . equals ( df , df2)
False
> > > df _ . equals ( df , df2 , ignore _ indices = { 0 } )
True
> > > df2 = df . reindex ( ( ' i3 ' , ' i1 ' , ' i2 ' ) )
> > > df2
columns1 c1 c2 c3
index1
i3 7 8 9
i1 1 2 3
i2 4 5 6
> > > df _ . equals ( df , df2 , ignore _ indices = { 0 } ) # does not ignore row order !
False
> > > df _ . equals ( df , df2 , ignore _ order = { 0 } )
True
> > > df2 = df . copy ( )
> > > df2 . index . name = ' other '
> > > df _ . equals ( df , df2 ) # df . index . name must match as well , same goes for df . columns . name
False''' | result = _equals ( df1 , df2 , ignore_order , ignore_indices , all_close )
if _return_reason :
return result
else :
return result [ 0 ] |
def get_cache_context ( self ) :
'''Retrieve a context cache from disk''' | with salt . utils . files . fopen ( self . cache_path , 'rb' ) as cache :
return salt . utils . data . decode ( self . serial . load ( cache ) ) |
def check_error ( res , error_enum ) :
"""Raise if the result has an error , otherwise return the result .""" | if res . HasField ( "error" ) :
enum_name = error_enum . DESCRIPTOR . full_name
error_name = error_enum . Name ( res . error )
details = getattr ( res , "error_details" , "<none>" )
raise RequestError ( "%s.%s: '%s'" % ( enum_name , error_name , details ) , res )
return res |
def get_changes ( self , factory_name , global_factory = False , resources = None , task_handle = taskhandle . NullTaskHandle ( ) ) :
"""Get the changes this refactoring makes
` factory _ name ` indicates the name of the factory function to
be added . If ` global _ factory ` is ` True ` the factory will be
global otherwise a static method is added to the class .
` resources ` can be a list of ` rope . base . resource . File ` \ s that
this refactoring should be applied on ; if ` None ` all python
files in the project are searched .""" | if resources is None :
resources = self . project . get_python_files ( )
changes = ChangeSet ( 'Introduce factory method <%s>' % factory_name )
job_set = task_handle . create_jobset ( 'Collecting Changes' , len ( resources ) )
self . _change_module ( resources , changes , factory_name , global_factory , job_set )
return changes |
def map_values2 ( self , func ) :
""": param func :
: type func : ( K , T ) - > U
: rtype : TDict [ U ]
Usage :
> > > TDict ( k1 = 1 , k2 = 2 , k3 = 3 ) . map _ values2 ( lambda k , v : f ' { k } - > { v * 2 } ' ) = = {
. . . " k1 " : " k1 - > 2 " ,
. . . " k2 " : " k2 - > 4 " ,
. . . " k3 " : " k3 - > 6"
True""" | return TDict ( { k : func ( k , v ) for k , v in self . items ( ) } ) |
def _parse_eval_args ( self , * args , ** kwargs ) :
"""NAME :
_ parse _ eval _ args
PURPOSE :
Internal function to parse the arguments given for an action / frequency / angle evaluation
INPUT :
OUTPUT :
HISTORY :
2010-07-11 - Written - Bovy ( NYU )""" | if len ( args ) == 3 : # R , vR . vT
R , vR , vT = args
self . _eval_R = R
self . _eval_vR = vR
self . _eval_vT = vT
self . _eval_z = 0.
self . _eval_vz = 0.
elif len ( args ) == 5 : # R , vR . vT , z , vz
R , vR , vT , z , vz = args
self . _eval_R = R
self . _eval_vR = vR
self . _eval_vT = vT
self . _eval_z = z
self . _eval_vz = vz
elif len ( args ) == 6 : # R , vR . vT , z , vz , phi
R , vR , vT , z , vz , phi = args
self . _eval_R = R
self . _eval_vR = vR
self . _eval_vT = vT
self . _eval_z = z
self . _eval_vz = vz
self . _eval_phi = phi
else :
if not kwargs . get ( '_noOrbUnitsCheck' , False ) :
self . _check_consistent_units_orbitInput ( args [ 0 ] )
if len ( args ) == 2 :
vxvv = args [ 0 ] ( args [ 1 ] ) . _orb . vxvv
else :
try :
vxvv = args [ 0 ] . _orb . vxvv
except AttributeError : # if we ' re given an OrbitTop instance
vxvv = args [ 0 ] . vxvv
self . _eval_R = vxvv [ 0 ]
self . _eval_vR = vxvv [ 1 ]
self . _eval_vT = vxvv [ 2 ]
if len ( vxvv ) > 4 :
self . _eval_z = vxvv [ 3 ]
self . _eval_vz = vxvv [ 4 ]
if len ( vxvv ) > 5 :
self . _eval_phi = vxvv [ 5 ]
elif len ( vxvv ) > 3 :
self . _eval_phi = vxvv [ 3 ]
self . _eval_z = 0.
self . _eval_vz = 0.
else :
self . _eval_z = 0.
self . _eval_vz = 0.
if hasattr ( self , '_eval_z' ) : # calculate the polar angle
if self . _eval_z == 0. :
self . _eval_theta = m . pi / 2.
else :
self . _eval_theta = m . atan ( self . _eval_R / self . _eval_z )
return None |
def print ( self , tag = None , name = None ) :
"""Prints each tuple to stdout flushing after each tuple .
If ` tag ` is not ` None ` then each tuple has " tag : " prepended
to it before printing .
Args :
tag : A tag to prepend to each tuple .
name ( str ) : Name of the resulting stream .
When ` None ` defaults to a generated name .
Returns :
streamsx . topology . topology . Sink : Stream termination .
. . versionadded : : 1.6.1 ` tag ` , ` name ` parameters .
. . versionchanged : : 1.7
Now returns a : py : class : ` Sink ` instance .""" | _name = name
if _name is None :
_name = 'print'
fn = streamsx . topology . functions . print_flush
if tag is not None :
tag = str ( tag ) + ': '
fn = lambda v : streamsx . topology . functions . print_flush ( tag + str ( v ) )
sp = self . for_each ( fn , name = _name )
sp . _op ( ) . sl = _SourceLocation ( _source_info ( ) , 'print' )
return sp |
def allocate ( self ) :
"""Returns an efficient portfolio allocation for the given risk index .""" | df = self . manager . get_historic_data ( ) [ self . SUPPORTED_COINS ]
# = = = = Calculate the daily changes = = = = #
change_columns = [ ]
for column in df :
if column in self . SUPPORTED_COINS :
change_column = '{}_change' . format ( column )
values = pd . Series ( ( df [ column ] . shift ( - 1 ) - df [ column ] ) / - df [ column ] . shift ( - 1 ) ) . values
df [ change_column ] = values
change_columns . append ( change_column )
# print ( df . head ( ) )
# print ( df . tail ( ) )
# = = = = Variances and returns = = = = #
columns = change_columns
# NOTE : ` risks ` is not used , but may be used in the future
risks = df [ columns ] . apply ( np . nanvar , axis = 0 )
# print ( ' \ nVariance : \ n { } \ n ' . format ( risks ) )
returns = df [ columns ] . apply ( np . nanmean , axis = 0 )
# print ( ' \ nExpected returns : \ n { } \ n ' . format ( returns ) )
# = = = = Calculate risk and expected return = = = = #
cov_matrix = df [ columns ] . cov ( )
# NOTE : The diagonal variances weren ' t calculated correctly , so here is a fix .
cov_matrix . values [ [ np . arange ( len ( self . SUPPORTED_COINS ) ) ] * 2 ] = df [ columns ] . apply ( np . nanvar , axis = 0 )
weights = np . array ( [ 1 / len ( self . SUPPORTED_COINS ) ] * len ( self . SUPPORTED_COINS ) ) . reshape ( len ( self . SUPPORTED_COINS ) , 1 )
# = = = = Calculate portfolio with the minimum risk = = = = #
min_risk = self . get_min_risk ( weights , cov_matrix )
min_return = np . dot ( min_risk , returns . values )
# = = = = Calculate portfolio with the maximum return = = = = #
max_return = self . get_max_return ( weights , returns )
# = = = = Calculate efficient frontier = = = = #
frontier = self . efficient_frontier ( returns , cov_matrix , min_return , max_return , 6 )
return frontier |
def attributes ( self , * args , ** kwargs ) :
"""Add one or more attributes to the : class : ` xml4h . nodes . Element ` node
represented by this Builder .
: return : the current Builder .
Delegates to : meth : ` xml4h . nodes . Element . set _ attributes ` .""" | self . _element . set_attributes ( * args , ** kwargs )
return self |
def get_named_type ( type_ ) : # noqa : F811
"""Unwrap possible wrapping type""" | if type_ :
unwrapped_type = type_
while is_wrapping_type ( unwrapped_type ) :
unwrapped_type = cast ( GraphQLWrappingType , unwrapped_type )
unwrapped_type = unwrapped_type . of_type
return cast ( GraphQLNamedType , unwrapped_type )
return None |
def get_links ( self , request = None ) :
"""Return a dictionary containing all the links that should be
included in the API schema .""" | links = LinkNode ( )
# Generate ( path , method , view ) given ( path , method , callback ) .
paths = [ ]
view_endpoints = [ ]
for path , method , callback in self . endpoints :
view = self . create_view ( callback , method , request )
if getattr ( view , 'exclude_from_schema' , False ) :
continue
path = self . coerce_path ( path , method , view )
paths . append ( path )
view_endpoints . append ( ( path , method , view ) )
# Only generate the path prefix for paths that will be included
if not paths :
return None
prefix = self . determine_path_prefix ( paths )
for path , method , view in view_endpoints :
if not self . has_view_permissions ( path , method , view ) :
continue
link = self . get_link ( path , method , view , version = getattr ( request , 'version' , None ) )
subpath = path [ len ( prefix ) : ]
keys = self . get_keys ( subpath , method , view )
try :
insert_into ( links , keys , link )
except Exception :
continue
return links |
def _request_raw_content ( self , url , timeout ) :
"""Send the request to get raw content .""" | request = Request ( url )
if self . referer is not None :
request . add_header ( 'Referer' , self . referer )
raw_xml = self . _call_geocoder ( request , timeout = timeout , deserializer = None )
return raw_xml |
def findall ( self , title = None ) :
"""Return a list of worksheets with the given title .
Args :
title ( str ) : title / name of the worksheets to return , or ` ` None ` ` for all
Returns :
list : list of contained worksheet instances ( possibly empty )""" | if title is None :
return list ( self . _sheets )
if title not in self . _titles :
return [ ]
return list ( self . _titles [ title ] ) |
def active_brokers ( self ) :
"""Return set of brokers that are not inactive or decommissioned .""" | return { broker for broker in self . _brokers if not broker . inactive and not broker . decommissioned } |
def _convert_snapshots ( topo_dir ) :
"""Convert 1 . x snapshot to the new format""" | old_snapshots_dir = os . path . join ( topo_dir , "project-files" , "snapshots" )
if os . path . exists ( old_snapshots_dir ) :
new_snapshots_dir = os . path . join ( topo_dir , "snapshots" )
os . makedirs ( new_snapshots_dir )
for snapshot in os . listdir ( old_snapshots_dir ) :
snapshot_dir = os . path . join ( old_snapshots_dir , snapshot )
if os . path . isdir ( snapshot_dir ) :
is_gns3_topo = False
# In . gns3project fileformat the . gns3 should be name project . gns3
for file in os . listdir ( snapshot_dir ) :
if file . endswith ( ".gns3" ) :
shutil . move ( os . path . join ( snapshot_dir , file ) , os . path . join ( snapshot_dir , "project.gns3" ) )
is_gns3_topo = True
if is_gns3_topo :
snapshot_arc = os . path . join ( new_snapshots_dir , snapshot + ".gns3project" )
with zipfile . ZipFile ( snapshot_arc , 'w' , allowZip64 = True ) as myzip :
for root , dirs , files in os . walk ( snapshot_dir ) :
for file in files :
myzip . write ( os . path . join ( root , file ) , os . path . relpath ( os . path . join ( root , file ) , snapshot_dir ) , compress_type = zipfile . ZIP_DEFLATED )
shutil . rmtree ( old_snapshots_dir ) |
def check_tx_ok ( result ) :
"""Checks if function : meth : ` UcanServer . write _ can _ msg ` successfully wrote CAN message ( s ) .
While using : meth : ` UcanServer . write _ can _ msg _ ex ` the number of sent CAN messages can be less than
the number of CAN messages which should be sent .
: param ReturnCode result : Error code of the function .
: return : True if CAN message ( s ) was ( were ) written successfully , otherwise False .
: rtype : bool
. . : seealso : : const : ` ReturnCode . WARN _ TXLIMIT `""" | return ( result . value == ReturnCode . SUCCESSFUL ) or ( result . value > ReturnCode . WARNING ) |
def _load_dataset ( self , images , labels , emotion_index_map ) :
"""Loads Dataset object with images , labels , and other data .
: param images : numpy array of image data
: param labels : numpy array of one - hot vector labels
: param emotion _ index _ map : map linking string / integer emotion class to integer index used in labels vectors
: return : Dataset object containing image and label data .""" | train_images , test_images , train_labels , test_labels = train_test_split ( images , labels , test_size = self . validation_split , random_state = 42 , stratify = labels )
dataset = Dataset ( train_images , test_images , train_labels , test_labels , emotion_index_map , self . time_delay )
return dataset |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.