signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def register_default_processors ( cls , frontend_editing = None ) :
"""Register our default request processors for the out - of - the - box
Page experience .
Since FeinCMS 1.11 was removed from core ."""
|
super ( Page , cls ) . register_default_processors ( )
if frontend_editing :
cls . register_request_processor ( edit_processors . frontendediting_request_processor , key = 'frontend_editing' )
cls . register_response_processor ( edit_processors . frontendediting_response_processor , key = 'frontend_editing' )
|
def Email ( self , From , To , Cc = None , Bcc = None , Subject = None , Tag = None , HtmlBody = None , TextBody = None , Metadata = None , ReplyTo = None , Headers = None , TrackOpens = None , TrackLinks = "None" , Attachments = None , ) :
"""Constructs : py : class : ` Email ` instance .
: return : : py : class : ` Email `"""
|
return Email ( manager = self , From = From , To = To , Cc = Cc , Bcc = Bcc , Subject = Subject , Tag = Tag , HtmlBody = HtmlBody , TextBody = TextBody , Metadata = Metadata , ReplyTo = ReplyTo , Headers = Headers , TrackOpens = TrackOpens , TrackLinks = TrackLinks , Attachments = Attachments , )
|
def _R2deriv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ R2deriv
PURPOSE :
evaluate the second radial derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the second radial derivative
HISTORY :
2015-06-15 - Written - Bovy ( IAS )"""
|
return ( self . _b2 - 2. * R ** 2. + z ** 2. ) * ( R ** 2. + z ** 2. + self . _b2 ) ** - 2.5
|
def check_whiteboard_status ( self , whiteboard ) :
"""Extracts stockwell text from a bug ' s whiteboard status to
determine whether it matches specified stockwell text ;
returns a boolean ."""
|
stockwell_text = re . search ( r'\[stockwell (.+?)\]' , whiteboard )
if stockwell_text is not None :
text = stockwell_text . group ( 1 ) . split ( ':' ) [ 0 ]
if text == 'fixed' or text == 'disable-recommended' or text == 'infra' or text == 'disabled' :
return True
return False
|
def xlim ( self , low , high ) :
"""Set xaxis limits
Parameters
low : number
high : number
Returns
Chart"""
|
self . chart [ 'xAxis' ] [ 0 ] [ 'min' ] = low
self . chart [ 'xAxis' ] [ 0 ] [ 'max' ] = high
return self
|
def report_privilege_information ( ) :
"Report all privilege information assigned to the current process ."
|
privileges = get_privilege_information ( )
print ( "found {0} privileges" . format ( privileges . count ) )
tuple ( map ( print , privileges ) )
|
def get_raw ( self , name = None ) :
'''Shortcut for getting a : class : ` ~ statsd . raw . Raw ` instance
: keyword name : See : func : ` ~ statsd . client . Client . get _ client `
: type name : str'''
|
return self . get_client ( name = name , class_ = statsd . Raw )
|
def create ( input_block : ModelFactory , rnn_type : str , output_dim : int , rnn_layers : typing . List [ int ] , rnn_dropout : float = 0.0 , bidirectional : bool = False , linear_layers : typing . List [ int ] = None , linear_dropout : float = 0.0 ) :
"""Vel factory function"""
|
if linear_layers is None :
linear_layers = [ ]
def instantiate ( ** _ ) :
return MultilayerRnnSequenceClassification ( input_block = input_block . instantiate ( ) , rnn_type = rnn_type , output_dim = output_dim , rnn_layers = rnn_layers , rnn_dropout = rnn_dropout , bidirectional = bidirectional , linear_layers = linear_layers , linear_dropout = linear_dropout )
return ModelFactory . generic ( instantiate )
|
def shortlex ( start , other , excludestart = False ) :
"""Yield all unions of start with other in shortlex order .
> > > [ ' { : 03b } ' . format ( s ) for s in shortlex ( 0 , [ 0b100 , 0b010 , 0b001 ] ) ]
[ ' 000 ' , ' 100 ' , ' 010 ' , ' 001 ' , ' 110 ' , ' 101 ' , ' 011 ' , ' 111 ' ]
> > > ' , ' . join ( ' ' . join ( sorted ( s ) )
. . . for s in shortlex ( set ( ) , [ { ' a ' } , { ' b ' } , { ' c ' } , { ' d ' } ] ) )
' , a , b , c , d , ab , ac , ad , bc , bd , cd , abc , abd , acd , bcd , abcd '
> > > assert list ( shortlex ( set ( ) , [ { 1 } , { 2 } ] , excludestart = True ) ) = = [ { 1 } , { 2 } , { 1 , 2 } ]"""
|
if not excludestart :
yield start
queue = collections . deque ( [ ( start , other ) ] )
while queue :
current , other = queue . popleft ( )
while other :
first , other = other [ 0 ] , other [ 1 : ]
result = current | first
yield result
if other :
queue . append ( ( result , other ) )
|
def _strftime ( expr , date_format ) :
"""Return formatted strings specified by date _ format ,
which supports the same string format as the python standard library .
Details of the string format can be found in python string format doc
: param expr :
: param date _ format : date format string ( e . g . “ % Y - % m - % d ” )
: type date _ format : str
: return :"""
|
return datetime_op ( expr , Strftime , output_type = types . string , _date_format = date_format )
|
def json_output ( cls , cs , score_dict , output_filename , ds_loc , limit , output_type = 'json' ) :
'''Generates JSON output for the ocmpliance score ( s )
@ param cs Compliance Checker Suite
@ param score _ groups List of results
@ param output _ filename The file path to output to
@ param ds _ loc List of source datasets
@ param limit The degree of strictness , 1 being the strictest ,
and going up from there .
@ param output _ type Either ' json ' or ' json _ new ' . json _ new is the new
json output format that supports multiple datasets'''
|
results = { }
# json output keys out at the top level by
if len ( score_dict ) > 1 and output_type != 'json_new' :
raise ValueError ( "output_type must be set to 'json_new' if outputting multiple datasets to a single json file or stdout" )
if output_type == 'json' :
for ds , score_groups in six . iteritems ( score_dict ) :
for checker , rpair in six . iteritems ( score_groups ) :
groups , errors = rpair
results [ checker ] = cs . dict_output ( checker , groups , ds , limit , )
elif output_type == 'json_new' :
for ds , score_groups in six . iteritems ( score_dict ) :
for checker , rpair in six . iteritems ( score_groups ) :
groups , errors = rpair
results [ ds ] = { }
results [ ds ] [ checker ] = cs . dict_output ( checker , groups , ds , limit )
json_results = json . dumps ( results , indent = 2 , ensure_ascii = False )
if output_filename == '-' :
print ( json_results )
else :
with io . open ( output_filename , 'w' , encoding = 'utf8' ) as f :
f . write ( json_results )
return groups
|
def getall ( self , key , type = None ) :
"""Return a list of values for the given key .
If ` type ` is not None , all values will be converted by calling ` type `
with the value as argument . if type ( ) raises ` ValueError ` , the value
will not appear in the result list ."""
|
values = [ ]
for k , v in self . _items :
if k == key :
if type is not None :
try :
values . append ( type ( v ) )
except ValueError :
pass
else :
values . append ( v )
return values
|
def start ( self ) :
"""Download files using wget or other downloader .
Optional curl , aria2c and httpie"""
|
dwn_count = 1
self . _directory_prefix ( )
for dwn in self . url : # get file name from url and fix passing char ' + '
self . file_name = dwn . split ( "/" ) [ - 1 ] . replace ( "%2B" , "+" )
if dwn . startswith ( "file:///" ) :
source_dir = dwn [ 7 : - 7 ] . replace ( slack_ver ( ) , "" )
self . _make_tarfile ( self . file_name , source_dir )
self . _check_certificate ( )
print ( "\n[{0}/{1}][ {2}Download{3} ] --> {4}\n" . format ( dwn_count , len ( self . url ) , self . meta . color [ "GREEN" ] , self . meta . color [ "ENDC" ] , self . file_name ) )
if self . downder in [ "wget" ] :
subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . dir_prefix , self . path , dwn ) , shell = True )
if self . downder in [ "aria2c" ] :
subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . dir_prefix , self . path [ : - 1 ] , dwn ) , shell = True )
elif self . downder in [ "curl" , "http" ] :
subprocess . call ( "{0} {1} {2}{3} {4}" . format ( self . downder , self . downder_options , self . path , self . file_name , dwn ) , shell = True )
self . _check_if_downloaded ( )
dwn_count += 1
|
def write_html ( self , html_dir = '/tmp' , include_osd = False , osd_width = 500 , osd_height = 500 ) :
"""Write HTML test page using OpenSeadragon for the tiles generated .
Assumes that the generate ( . . ) method has already been called to set up
identifier etc . Parameters :
html _ dir - output directory for HTML files , will be created if it
does not already exist
include _ osd - true to include OpenSeadragon code
osd _ width - width of OpenSeadragon pane in pixels
osd _ height - height of OpenSeadragon pane in pixels"""
|
osd_config = self . get_osd_config ( self . osd_version )
osd_base = osd_config [ 'base' ]
osd_dir = osd_config [ 'dir' ]
# relative to base
osd_js = os . path . join ( osd_dir , osd_config [ 'js' ] )
osd_images = os . path . join ( osd_dir , osd_config [ 'images' ] )
if ( os . path . isdir ( html_dir ) ) : # Exists , fine
pass
elif ( os . path . isfile ( html_dir ) ) :
raise IIIFStaticError ( "Can't write to directory %s: a file of that name exists" % html_dir )
else :
os . makedirs ( html_dir )
self . logger . info ( "Writing HTML to %s" % ( html_dir ) )
with open ( os . path . join ( self . template_dir , 'static_osd.html' ) , 'r' ) as f :
template = f . read ( )
outfile = self . identifier + '.html'
outpath = os . path . join ( html_dir , outfile )
with open ( outpath , 'w' ) as f :
info_json_uri = '/' . join ( [ self . identifier , 'info.json' ] )
if ( self . prefix ) :
info_json_uri = '/' . join ( [ self . prefix , info_json_uri ] )
d = dict ( identifier = self . identifier , api_version = self . api_version , osd_version = self . osd_version , osd_uri = osd_js , osd_images_prefix = osd_images , osd_height = osd_width , osd_width = osd_height , info_json_uri = info_json_uri )
f . write ( Template ( template ) . safe_substitute ( d ) )
self . logger . info ( "%s / %s" % ( html_dir , outfile ) )
# Do we want to copy OSD in there too ? If so , do it only if
# we haven ' t already
if ( include_osd ) :
if ( self . copied_osd ) :
self . logger . info ( "OpenSeadragon already copied" )
else : # Make directory , copy JavaScript and icons ( from osd _ images )
osd_path = os . path . join ( html_dir , osd_dir )
if ( not os . path . isdir ( osd_path ) ) :
os . makedirs ( osd_path )
shutil . copyfile ( os . path . join ( osd_base , osd_js ) , os . path . join ( html_dir , osd_js ) )
self . logger . info ( "%s / %s" % ( html_dir , osd_js ) )
osd_images_path = os . path . join ( html_dir , osd_images )
if ( os . path . isdir ( osd_images_path ) ) :
self . logger . warning ( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path )
else :
shutil . copytree ( os . path . join ( osd_base , osd_images ) , osd_images_path )
self . logger . info ( "%s / %s/*" % ( html_dir , osd_images ) )
self . copied_osd = True
|
def get_next_types ( self , n = None ) :
"""Gets the next set of ` ` Types ` ` in this list .
The specified amount must be less than or equal to the return
from ` ` available ( ) ` ` .
arg : n ( cardinal ) : the number of ` ` Type ` ` elements requested
which must be less than or equal to ` ` available ( ) ` `
return : ( osid . type . Type ) - an array of ` ` Type ` ` elements . The
length of the array is less than or equal to the number
specified .
raise : IllegalState - no more elements available in this list
raise : OperationFailed - unable to complete request
* compliance : mandatory - - This method must be implemented . *"""
|
if n > self . available ( ) : # ! ! ! This is not quite as specified ( see method docs ) ! ! !
raise IllegalState ( 'not enough elements available in this list' )
else :
next_list = [ ]
i = 0
while i < n :
try :
next_list . append ( next ( self ) )
except : # Need to specify exceptions here
raise OperationFailed ( )
i += 1
return next_list
|
def load_array ( path , group = None ) :
"""Load an Array from a . hdf , . txt or . npy file . The
default data types will be double precision floating point .
Parameters
path : string
source file path . Must end with either . npy or . txt .
group : string
Additional name for internal storage use . Ex . hdf storage uses
this as the key value .
Raises
ValueError
If path does not end in . npy or . txt ."""
|
ext = _os . path . splitext ( path ) [ 1 ]
if ext == '.npy' :
data = _numpy . load ( path )
elif ext == '.txt' :
data = _numpy . loadtxt ( path )
elif ext == '.hdf' :
key = 'data' if group is None else group
return Array ( h5py . File ( path ) [ key ] )
else :
raise ValueError ( 'Path must end with .npy, .hdf, or .txt' )
if data . ndim == 1 :
return Array ( data )
elif data . ndim == 2 :
return Array ( data [ : , 0 ] + 1j * data [ : , 1 ] )
else :
raise ValueError ( 'File has %s dimensions, cannot convert to Array, \
must be 1 (real) or 2 (complex)' % data . ndim )
|
def properties ( self ) :
"""Morphosemantic property mapping .
Unlike : attr : ` sortinfo ` , this does not include ` cvarsort ` ."""
|
d = dict ( self . sortinfo )
if CVARSORT in d :
del d [ CVARSORT ]
return d
|
def processInput ( self , dataAveraging = False , windowSize = None ) :
"""# TODO : docstring
: param dataAveraging : # TODO : docstring
: param windowSize : # TODO : docstring"""
|
self . dependentVar = numpy . array ( self . dependentVarInput , dtype = numpy . float64 )
self . independentVar = numpy . array ( self . independentVarInput , dtype = numpy . float64 )
sortMask = self . independentVar . argsort ( )
self . dependentVar = self . dependentVar [ sortMask ]
self . independentVar = self . independentVar [ sortMask ]
if dataAveraging :
averagedData = averagingData ( self . dependentVar , windowSize = windowSize , averagingType = dataAveraging )
averagedData = numpy . array ( averagedData , dtype = numpy . float64 )
missingNumHigh = numpy . floor ( ( self . independentVar . size - averagedData . size ) / 2 )
missingNumLow = ( ( self . independentVar . size - averagedData . size ) - missingNumHigh )
self . dependentVar = averagedData
self . independentVar = self . independentVar [ missingNumLow : - missingNumHigh ]
|
def input_ratio ( self , name , choices , labels , multi_line = False ) :
"""{ % for value , label in [ ( ' choice1 ' , ' 选项1 ' ) , ( ' choice2 ' , ' 选项2 ' ) ] % }
{ % if ratio = = value % }
{ % set checked = " checked " % }
{ % else % }
{ % set checked = " " % }
{ % endif % }
< input type = " radio " name = " ratio " value = " { { value } } " { { checked } } / > { { label } }
{ % endfor % }"""
|
if len ( choices ) != len ( labels ) :
raise ValueError ( "The size of 'choices' and 'labels' doesn't match!" )
choice_label = list ( zip ( choices , labels ) )
lines = list ( )
lines . append ( '{%% for value, label in %s %%}' % repr ( choice_label ) )
lines . append ( self . tab + '{%% if %s == value %%}' % name )
lines . append ( self . tab * 2 + '{% set checked = "checked" %}' )
lines . append ( self . tab + '{% else %}' )
lines . append ( self . tab * 2 + '{% set checked = "" %}' )
lines . append ( self . tab + '{% endif %}' )
if multi_line :
line_break = "<br>"
else :
line_break = ""
lines . append ( self . tab + '<input type="radio" name="%s" value="{{value}}" {{checked}} /> {{label}} %s' % ( name , line_break ) )
lines . append ( '{% endfor %}' )
return "\n" . join ( lines )
|
def copy ( self ) :
"""Return a new : class : ` FontOptions ` with the same values ."""
|
cls = type ( self )
other = object . __new__ ( cls )
cls . _init_pointer ( other , cairo . cairo_font_options_copy ( self . _pointer ) )
return other
|
def _handleCtrlZ ( self ) :
"""Handler for CTRL + Z keypresses"""
|
if self . _typingSms :
self . serial . write ( '' . join ( self . inputBuffer ) )
self . serial . write ( self . CTRL_Z_CHARACTER )
self . _typingSms = False
self . inputBuffer = [ ]
self . cursorPos = 0
sys . stdout . write ( '\n' )
self . _refreshInputPrompt ( )
|
def _parse_properties ( self , properties ) :
"""Make list of properties into 2 things :
(1 ) dictionary of { ' aliased - field ' : 1 , . . . } for a mongodb query eg . { ' ' }
(2 ) dictionary , keyed by aliased field , for display"""
|
props = { }
# TODO : clean up prop _ dict ?
prop_dict = OrderedDict ( )
# We use a dict instead of list to provide for a richer syntax
for p in properties :
if p in self . aliases :
if isinstance ( properties , dict ) :
props [ self . aliases [ p ] ] = properties [ p ]
else :
props [ self . aliases [ p ] ] = 1
prop_dict [ p ] = self . aliases [ p ] . split ( "." )
else :
if isinstance ( properties , dict ) :
props [ p ] = properties [ p ]
else :
props [ p ] = 1
prop_dict [ p ] = p . split ( "." )
# including a lower - level key after a higher level key e . g . :
# { ' output ' : 1 , ' output . crystal ' : 1 } instead of
# { ' output . crystal ' : 1 , ' output ' : 1}
# causes mongo to skip the other higher level keys .
# this is a ( sketchy ) workaround for that . Note this problem
# doesn ' t appear often in python2 because the dictionary ordering
# is more stable .
props = OrderedDict ( sorted ( props . items ( ) , reverse = True ) )
return props , prop_dict
|
def transformer ( self , instance_count , instance_type , strategy = None , assemble_with = None , output_path = None , output_kms_key = None , accept = None , env = None , max_concurrent_transforms = None , max_payload = None , tags = None , role = None , model_server_workers = None , volume_kms_key = None ) :
"""Return a ` ` Transformer ` ` that uses a SageMaker Model based on the training job . It reuses the
SageMaker Session and base job name used by the Estimator .
Args :
instance _ count ( int ) : Number of EC2 instances to use .
instance _ type ( str ) : Type of EC2 instance to use , for example , ' ml . c4 . xlarge ' .
strategy ( str ) : The strategy used to decide how to batch records in a single request ( default : None ) .
Valid values : ' MULTI _ RECORD ' and ' SINGLE _ RECORD ' .
assemble _ with ( str ) : How the output is assembled ( default : None ) . Valid values : ' Line ' or ' None ' .
output _ path ( str ) : S3 location for saving the transform result . If not specified , results are stored to
a default bucket .
output _ kms _ key ( str ) : Optional . KMS key ID for encrypting the transform output ( default : None ) .
accept ( str ) : The content type accepted by the endpoint deployed during the transform job .
env ( dict ) : Environment variables to be set for use during the transform job ( default : None ) .
max _ concurrent _ transforms ( int ) : The maximum number of HTTP requests to be made to
each individual transform container at one time .
max _ payload ( int ) : Maximum size of the payload in a single HTTP request to the container in MB .
tags ( list [ dict ] ) : List of tags for labeling a transform job . If none specified , then the tags used for
the training job are used for the transform job .
role ( str ) : The ` ` ExecutionRoleArn ` ` IAM Role ARN for the ` ` Model ` ` , which is also used during
transform jobs . If not specified , the role from the Estimator will be used .
model _ server _ workers ( int ) : Optional . The number of worker processes used by the inference server .
If None , server will use one worker per vCPU .
volume _ kms _ key ( str ) : Optional . KMS key ID for encrypting the volume attached to the ML
compute instance ( default : None ) ."""
|
role = role or self . role
if self . latest_training_job is not None :
model = self . create_model ( role = role , model_server_workers = model_server_workers )
container_def = model . prepare_container_def ( instance_type )
model_name = model . name or name_from_image ( container_def [ 'Image' ] )
vpc_config = model . vpc_config
self . sagemaker_session . create_model ( model_name , role , container_def , vpc_config )
transform_env = model . env . copy ( )
if env is not None :
transform_env . update ( env )
else :
logging . warning ( 'No finished training job found associated with this estimator. Please make sure' 'this estimator is only used for building workflow config' )
model_name = self . _current_job_name
transform_env = env or { }
tags = tags or self . tags
return Transformer ( model_name , instance_count , instance_type , strategy = strategy , assemble_with = assemble_with , output_path = output_path , output_kms_key = output_kms_key , accept = accept , max_concurrent_transforms = max_concurrent_transforms , max_payload = max_payload , env = transform_env , tags = tags , base_transform_job_name = self . base_job_name , volume_kms_key = volume_kms_key , sagemaker_session = self . sagemaker_session )
|
def _datetime_to_tuple ( dt_dict ) :
"""datetime . datetime components from dictionary to tuple .
Example
dt _ dict = { ' year ' : ' 2014 ' , ' month ' : ' 07 ' , ' day ' : ' 23 ' ,
' hour ' : ' 13 ' , ' minute ' : ' 12 ' , ' second ' : ' 45 ' , ' microsecond ' : ' 321 ' }
_ datetime _ to _ tuple ( dt _ dict ) - > ( 2014 , 7 , 23 , 13 , 12 , 45 , 321)"""
|
year , month , day = _date_to_tuple ( dt_dict )
hour , minute , second , microsecond = _time_to_tuple ( dt_dict )
return year , month , day , hour , minute , second , microsecond
|
def plot ( self , value = None , pixel = None ) :
"""Plot the ROI"""
|
# DEPRECATED
import ugali . utils . plotting
map_roi = np . array ( hp . UNSEEN * np . ones ( hp . nside2npix ( self . config . params [ 'coords' ] [ 'nside_pixel' ] ) ) )
if value is None : # map _ roi [ self . pixels ] = ugali . utils . projector . angsep ( self . lon , self . lat , self . centers _ lon , self . centers _ lat )
map_roi [ self . pixels ] = 1
map_roi [ self . pixels_annulus ] = 0
map_roi [ self . pixels_target ] = 2
elif value is not None and pixel is None :
map_roi [ self . pixels ] = value
elif value is not None and pixel is not None :
map_roi [ pixel ] = value
else :
logger . error ( "Can't parse input" )
ugali . utils . plotting . zoomedHealpixMap ( 'Region of Interest' , map_roi , self . lon , self . lat , self . config . params [ 'coords' ] [ 'roi_radius' ] )
|
def prune ( self , vault_client ) :
"""Will remove any mount point which is not actually defined
in this context ."""
|
existing = getattr ( vault_client , SecretBackend . list_fun ) ( ) [ 'data' ] . items ( )
for mount_name , _values in existing : # ignore system paths and cubbyhole
mount_path = normalize_vault_path ( mount_name )
if mount_path . startswith ( 'sys' ) or mount_path == 'cubbyhole' :
continue
exists = [ resource . path for resource in self . mounts ( ) if normalize_vault_path ( resource . path ) == mount_path ]
if not exists :
LOG . info ( "removed unknown mount %s" , mount_path )
getattr ( vault_client , SecretBackend . unmount_fun ) ( mount_path )
|
def _delete_dynamic_versions ( self ) :
"""Call the ` delete ` method of all dynamic versions of the current field
found in the inventory then clean the inventory ."""
|
if self . dynamic_version_of :
raise ImplementationError ( u'"_delete_dynamic_versions" can only be ' u'executed on the base field' )
inventory = self . _inventory
for dynamic_part in inventory . smembers ( ) :
name = self . get_name_for ( dynamic_part )
# create the field
new_field = self . _create_dynamic_version ( )
new_field . name = name
new_field . _dynamic_part = dynamic_part
# avoid useless computation
new_field . _attach_to_model ( self . _model )
new_field . _attach_to_instance ( self . _instance )
# and delete its content
new_field . delete ( )
inventory . delete ( )
|
def calculate_width_content ( width , border = False , margin = None , margin_left = None , margin_right = None , padding = None , padding_left = None , padding_right = None ) :
"""Calculate actual widget content width based on given margins and paddings ."""
|
if margin_left is None :
margin_left = margin
if margin_right is None :
margin_right = margin
if margin_left is not None :
width -= int ( margin_left )
if margin_right is not None :
width -= int ( margin_right )
if padding_left is None :
padding_left = padding
if padding_right is None :
padding_right = padding
if padding_left is not None :
width -= int ( padding_left )
if padding_right is not None :
width -= int ( padding_right )
if border :
width -= 2
return width if width > 0 else None
|
def get_default_compartment ( model ) :
"""Return what the default compartment should be set to .
If some compounds have no compartment , unique compartment
name is returned to avoid collisions ."""
|
default_compartment = 'c'
default_key = set ( )
for reaction in model . reactions :
equation = reaction . equation
if equation is None :
continue
for compound , _ in equation . compounds :
default_key . add ( compound . compartment )
if None in default_key and default_compartment in default_key :
suffix = 1
while True :
new_key = '{}_{}' . format ( default_compartment , suffix )
if new_key not in default_key :
default_compartment = new_key
break
suffix += 1
if None in default_key :
logger . warning ( 'Compound(s) found without compartment, default' ' compartment is set to {}.' . format ( default_compartment ) )
return default_compartment
|
def engine_from_environment ( ) -> Engine :
"""Returns an Engine instance configured using environment variables .
If the environment variables are set , but incorrect , an authentication
failure will occur when attempting to run jobs on the engine .
Required Environment Variables :
QUANTUM _ ENGINE _ PROJECT : The name of a google cloud project , with the
quantum engine enabled , that you have access to .
QUANTUM _ ENGINE _ API _ KEY : An API key for the google cloud project named
by QUANTUM _ ENGINE _ PROJECT .
Raises :
EnvironmentError : The environment variables are not set ."""
|
api_key = os . environ . get ( ENV_API_KEY )
if not api_key :
raise EnvironmentError ( 'Environment variable {} is not set.' . format ( ENV_API_KEY ) )
default_project_id = os . environ . get ( ENV_DEFAULT_PROJECT_ID )
return Engine ( api_key = api_key , default_project_id = default_project_id )
|
def update_rating ( self , postid ) :
'''only the used who logged in would voting .'''
|
post_data = self . get_post_data ( )
rating = float ( post_data [ 'rating' ] )
postinfo = MPost . get_by_uid ( postid )
if postinfo and self . userinfo :
MRating . update ( postinfo . uid , self . userinfo . uid , rating = rating )
self . update_post ( postid )
else :
return False
|
def GetEntries ( self , parser_mediator , match = None , ** unused_kwargs ) :
"""Extracts relevant user timestamp entries .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
match ( Optional [ dict [ str : object ] ] ) : keys extracted from PLIST _ KEYS ."""
|
if 'name' not in match or 'uid' not in match :
return
account = match [ 'name' ] [ 0 ]
uid = match [ 'uid' ] [ 0 ]
for policy in match . get ( 'passwordpolicyoptions' , [ ] ) :
try :
xml_policy = ElementTree . fromstring ( policy )
except ( ElementTree . ParseError , LookupError ) as exception :
logger . error ( ( 'Unable to parse XML structure for an user policy, account: ' '{0:s} and uid: {1!s}, with error: {2!s}' ) . format ( account , uid , exception ) )
continue
for dict_elements in xml_policy . iterfind ( 'dict' ) :
key_values = [ value . text for value in iter ( dict_elements ) ]
# Taking a list and converting it to a dict , using every other item
# as the key and the other one as the value .
policy_dict = dict ( zip ( key_values [ 0 : : 2 ] , key_values [ 1 : : 2 ] ) )
time_string = policy_dict . get ( 'passwordLastSetTime' , None )
if time_string and time_string != '2001-01-01T00:00:00Z' :
try :
date_time = dfdatetime_time_elements . TimeElements ( )
date_time . CopyFromStringISO8601 ( time_string )
except ValueError :
date_time = None
parser_mediator . ProduceExtractionWarning ( 'unable to parse password last set time string: {0:s}' . format ( time_string ) )
shadow_hash_data = match . get ( 'ShadowHashData' , None )
if date_time and isinstance ( shadow_hash_data , ( list , tuple ) ) : # Extract the hash password information .
# It is store in the attribute ShadowHasData which is
# a binary plist data ; However biplist only extracts one
# level of binary plist , then it returns this information
# as a string .
# TODO : change this into a DataRange instead . For this we
# need the file offset and size of the ShadowHashData value data .
shadow_hash_data = shadow_hash_data [ 0 ]
resolver_context = context . Context ( )
fake_file = fake_file_io . FakeFile ( resolver_context , shadow_hash_data )
shadow_hash_data_path_spec = fake_path_spec . FakePathSpec ( location = 'ShadowHashData' )
fake_file . open ( path_spec = shadow_hash_data_path_spec )
try :
plist_file = biplist . readPlist ( fake_file )
except biplist . InvalidPlistException :
plist_file = { }
salted_hash = plist_file . get ( 'SALTED-SHA512-PBKDF2' , None )
if salted_hash :
salt_hex_bytes = codecs . encode ( salted_hash [ 'salt' ] , 'hex' )
salt_string = codecs . decode ( salt_hex_bytes , 'ascii' )
entropy_hex_bytes = codecs . encode ( salted_hash [ 'entropy' ] , 'hex' )
entropy_string = codecs . decode ( entropy_hex_bytes , 'ascii' )
password_hash = '$ml${0:d}${1:s}${2:s}' . format ( salted_hash [ 'iterations' ] , salt_string , entropy_string )
else :
password_hash = 'N/A'
event_data = plist_event . PlistTimeEventData ( )
event_data . desc = ( 'Last time {0:s} ({1!s}) changed the password: {2!s}' ) . format ( account , uid , password_hash )
event_data . key = 'passwordLastSetTime'
event_data . root = self . _ROOT
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
time_string = policy_dict . get ( 'lastLoginTimestamp' , None )
if time_string and time_string != '2001-01-01T00:00:00Z' :
try :
date_time = dfdatetime_time_elements . TimeElements ( )
date_time . CopyFromStringISO8601 ( time_string )
except ValueError :
date_time = None
parser_mediator . ProduceExtractionWarning ( 'unable to parse last login time string: {0:s}' . format ( time_string ) )
if date_time :
event_data = plist_event . PlistTimeEventData ( )
event_data . desc = 'Last login from {0:s} ({1!s})' . format ( account , uid )
event_data . key = 'lastLoginTimestamp'
event_data . root = self . _ROOT
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
time_string = policy_dict . get ( 'failedLoginTimestamp' , None )
if time_string and time_string != '2001-01-01T00:00:00Z' :
try :
date_time = dfdatetime_time_elements . TimeElements ( )
date_time . CopyFromStringISO8601 ( time_string )
except ValueError :
date_time = None
parser_mediator . ProduceExtractionWarning ( 'unable to parse failed login time string: {0:s}' . format ( time_string ) )
if date_time :
event_data = plist_event . PlistTimeEventData ( )
event_data . desc = ( 'Last failed login from {0:s} ({1!s}) ({2!s} times)' ) . format ( account , uid , policy_dict . get ( 'failedLoginCount' , 0 ) )
event_data . key = 'failedLoginTimestamp'
event_data . root = self . _ROOT
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def deployments ( self ) -> Union [ "Deployments" , Dict [ None , None ] ] :
"""Returns a ` ` Deployments ` ` object containing all the deployment data and contract
factories of a ` ` Package ` ` ' s ` contract _ types ` . Automatically filters deployments
to only expose those available on the current ` ` Package . w3 ` ` instance .
. . code : : python
package . deployments . get _ instance ( " ContractType " )"""
|
if not check_for_deployments ( self . manifest ) :
return { }
all_blockchain_uris = self . manifest [ "deployments" ] . keys ( )
matching_uri = validate_single_matching_uri ( all_blockchain_uris , self . w3 )
deployments = self . manifest [ "deployments" ] [ matching_uri ]
all_contract_factories = { deployment_data [ "contract_type" ] : self . get_contract_factory ( deployment_data [ "contract_type" ] ) for deployment_data in deployments . values ( ) }
validate_deployments_tx_receipt ( deployments , self . w3 , allow_missing_data = True )
linked_deployments = get_linked_deployments ( deployments )
if linked_deployments :
for deployment_data in linked_deployments . values ( ) :
on_chain_bytecode = self . w3 . eth . getCode ( to_canonical_address ( deployment_data [ "address" ] ) )
unresolved_linked_refs = normalize_linked_references ( deployment_data [ "runtime_bytecode" ] [ "link_dependencies" ] )
resolved_linked_refs = tuple ( self . _resolve_linked_references ( link_ref , deployments ) for link_ref in unresolved_linked_refs )
for linked_ref in resolved_linked_refs :
validate_linked_references ( linked_ref , on_chain_bytecode )
return Deployments ( deployments , all_contract_factories , self . w3 )
|
def add_mobile_gateway ( self , gateway ) :
"""Add a mobile VPN gateway to this policy VPN .
Example of adding or removing a mobile VPN gateway : :
policy _ vpn = PolicyVPN ( ' myvpn ' )
policy _ vpn . open ( )
policy _ vpn . add _ mobile _ vpn _ gateway ( ExternalGateway ( ' extgw3 ' ) )
for mobile _ gateway in policy _ vpn . mobile _ gateway _ node :
if mobile _ gateway . gateway = = ExternalGateway ( ' extgw3 ' ) :
mobile _ gateway . delete ( )
policy _ vpn . save ( )
policy _ vpn . close ( )
: param Engine , ExternalGateway gateway : An external gateway , engine or
href for the mobile gateway
: raises PolicyCommandFailed : could not add gateway"""
|
try :
gateway = gateway . vpn . internal_gateway . href
# Engine
except AttributeError :
gateway = element_resolver ( gateway )
# External Gateway
self . make_request ( PolicyCommandFailed , method = 'create' , resource = 'mobile_gateway_node' , json = { 'gateway' : gateway , 'node_usage' : 'mobile' } )
|
def to_tree ( self ) :
"""returns a TreeLib tree"""
|
tree = TreeLibTree ( )
for node in self :
tree . create_node ( node , node . node_id , parent = node . parent )
return tree
|
def registry_storage ( cls ) :
"""Get registry storage
: return : WTaskRegistryBase"""
|
if cls . __registry_storage__ is None :
raise ValueError ( '__registry_storage__ must be defined' )
if isinstance ( cls . __registry_storage__ , WTaskRegistryBase ) is False :
raise TypeError ( "Property '__registry_storage__' is invalid (must derived from WTaskRegistryBase)" )
return cls . __registry_storage__
|
def add_doc ( self , doc , index_update = True , label_guesser_update = True ) :
"""Add a document to the index"""
|
if not self . index_writer and index_update :
self . index_writer = self . index . writer ( )
if not self . label_guesser_updater and label_guesser_update :
self . label_guesser_updater = self . label_guesser . get_updater ( )
logger . info ( "Indexing new doc: %s" % doc )
if index_update :
self . _update_doc_in_index ( self . index_writer , doc )
if label_guesser_update :
self . label_guesser_updater . add_doc ( doc )
if doc . docid not in self . _docs_by_id :
self . _docs_by_id [ doc . docid ] = doc
|
def replace ( text , old , new , count = None , strip = False ) :
'''Replace an ` ` old ` ` subset of ` ` text ` ` with ` ` new ` ` .
` ` old ` ` type may be either a string or regular expression .
If ` ` strip ` ` , remove all leading / trailing whitespace .
If ` ` count ` ` , replace the specified number of occurence , otherwise replace all .'''
|
if is_string ( old ) :
text = text . replace ( old , new , - 1 if count is None else count )
else :
text = old . sub ( new , text , 0 if count is None else count )
if strip :
text = text . strip ( None if strip == True else strip )
return text
|
def _remove_document ( self , gh_user , doc_id , parent_sha , author , commit_msg = None ) :
"""Remove a document
Remove a document on the given branch and attribute the commit to author .
Returns the SHA of the commit on branch ."""
|
# _ LOG . debug ( " @ @ @ @ @ GitActionBase . _ remove _ document , doc _ id = { } " . format ( doc _ id ) )
doc_filepath = self . path_for_doc ( doc_id )
# _ LOG . debug ( " @ @ @ @ @ GitActionBase . _ remove _ document , doc _ filepath = { } " . format ( doc _ filepath ) )
branch = self . create_or_checkout_branch ( gh_user , doc_id , parent_sha )
prev_file_sha = None
if commit_msg is None :
msg = "Delete document '%s' via OpenTree API" % doc_id
else :
msg = commit_msg
if os . path . exists ( doc_filepath ) :
prev_file_sha = self . get_blob_sha_for_file ( doc_filepath )
if self . doc_type == 'nexson' : # delete the parent directory entirely
doc_dir = os . path . split ( doc_filepath ) [ 0 ]
# _ LOG . debug ( " @ @ @ @ @ GitActionBase . _ remove _ document , doc _ dir = { } " . format ( doc _ dir ) )
git ( self . gitdir , self . gitwd , "rm" , "-rf" , doc_dir )
elif self . doc_type in ( 'collection' , 'favorites' , 'amendment' ) : # delete just the target file
git ( self . gitdir , self . gitwd , "rm" , doc_filepath )
else :
raise NotImplementedError ( "No deletion rules for doc_type '{}'" . format ( self . doc_type ) )
git ( self . gitdir , self . gitwd , "commit" , author = author , message = msg )
new_sha = git ( self . gitdir , self . gitwd , "rev-parse" , "HEAD" ) . strip ( )
return { 'commit_sha' : new_sha , 'branch' : branch , 'prev_file_sha' : prev_file_sha , }
|
def _sanity_check_all_marked_locations_are_registered ( ir_blocks , query_metadata_table ) :
"""Assert that all locations in MarkLocation blocks have registered and valid metadata ."""
|
# Grab all the registered locations , then make sure that :
# - Any location that appears in a MarkLocation block is also registered .
# - There are no registered locations that do not appear in a MarkLocation block .
registered_locations = { location for location , _ in query_metadata_table . registered_locations }
ir_encountered_locations = { block . location for block in ir_blocks if isinstance ( block , MarkLocation ) }
unregistered_locations = ir_encountered_locations - registered_locations
unencountered_locations = registered_locations - ir_encountered_locations
if unregistered_locations :
raise AssertionError ( u'IR blocks unexpectedly contain locations not registered in the ' u'QueryMetadataTable: {}' . format ( unregistered_locations ) )
if unencountered_locations :
raise AssertionError ( u'QueryMetadataTable unexpectedly contains registered locations that ' u'never appear in the IR blocks: {}' . format ( unencountered_locations ) )
|
def _find_mime_parameters ( tokenlist , value ) :
"""Do our best to find the parameters in an invalid MIME header"""
|
while value and value [ 0 ] != ';' :
if value [ 0 ] in PHRASE_ENDS :
tokenlist . append ( ValueTerminal ( value [ 0 ] , 'misplaced-special' ) )
value = value [ 1 : ]
else :
token , value = get_phrase ( value )
tokenlist . append ( token )
if not value :
return
tokenlist . append ( ValueTerminal ( ';' , 'parameter-separator' ) )
tokenlist . append ( parse_mime_parameters ( value [ 1 : ] ) )
|
def ddebug ( msg , err = None ) : # pragma : no cover
"""err can be an instance of sys . exc _ info ( ) - - which is the latest traceback
info"""
|
import os
if err :
err = '' . join ( traceback . format_exception ( * err ) )
else :
err = ''
sys . __stdout__ . write ( "({}) {} {}" . format ( os . getpid ( ) , msg , err ) + '\n' )
sys . __stdout__ . flush ( )
|
def get_high_water_mark ( self , mark_type , obstory_name = None ) :
"""Retrieves the high water mark for a given obstory , defaulting to the current installation ID
: param string mark _ type :
The type of high water mark to set
: param string obstory _ name :
The obstory ID to check for , or the default installation ID if not specified
: return :
A UTC datetime for the high water mark , or None if none was found ."""
|
if obstory_name is None :
obstory_name = self . obstory_name
obstory = self . get_obstory_from_name ( obstory_name )
key_id = self . get_hwm_key_id ( mark_type )
self . con . execute ( 'SELECT time FROM archive_highWaterMarks WHERE markType=%s AND observatoryId=%s' , ( key_id , obstory [ 'uid' ] ) )
results = self . con . fetchall ( )
if len ( results ) > 0 :
return results [ 0 ] [ 'time' ]
return None
|
def xslt_transformation ( xml , template ) :
"""Transform ` xml ` using XSLT ` template ` .
Args :
xml ( str ) : Filename or XML string . Don ' t use ` ` \\ n ` ` in case of
filename .
template ( str ) : Filename or XML string . Don ' t use ` ` \\ n ` ` in case of
filename .
Returns :
str : Transformed ` xml ` as string ."""
|
transformer = ET . XSLT ( _read_template ( template ) )
newdom = transformer ( _read_marcxml ( xml ) )
return ET . tostring ( newdom , pretty_print = True , encoding = "utf-8" )
|
def iter_links ( self ) : # type : ( ) - > Iterable [ Link ]
"""Yields all links in the page"""
|
document = html5lib . parse ( self . content , transport_encoding = _get_encoding_from_headers ( self . headers ) , namespaceHTMLElements = False , )
base_url = _determine_base_url ( document , self . url )
for anchor in document . findall ( ".//a" ) :
if anchor . get ( "href" ) :
href = anchor . get ( "href" )
url = _clean_link ( urllib_parse . urljoin ( base_url , href ) )
pyrequire = anchor . get ( 'data-requires-python' )
pyrequire = unescape ( pyrequire ) if pyrequire else None
yield Link ( url , self . url , requires_python = pyrequire )
|
def table_to_intermediary ( table ) :
"""Transform an SQLAlchemy Table object to it ' s intermediary representation ."""
|
return Table ( name = table . fullname , columns = [ column_to_intermediary ( col ) for col in table . c . _data . values ( ) ] )
|
def sqrt ( self , val , flag ) :
"""calculate the square root modulus p"""
|
if val . iszero ( ) :
return val
sw = self . p % 8
if sw == 3 or sw == 7 :
res = val ** ( ( self . p + 1 ) / 4 )
elif sw == 5 :
x = val ** ( ( self . p + 1 ) / 4 )
if x == 1 :
res = val ** ( ( self . p + 3 ) / 8 )
else :
res = ( 4 * val ) ** ( ( self . p - 5 ) / 8 ) * 2 * val
else :
raise Exception ( "modsqrt non supported for (p%8)==1" )
if res . value % 2 == flag :
return res
else :
return - res
|
def extendedMeasurementOrder ( ) :
"""EXTENDED MEASUREMENT ORDER Section 9.1.51"""
|
a = L2PseudoLength ( l2pLength = 0x12 )
b = TpPd ( pd = 0x6 )
c = MessageType ( mesType = 0x37 )
# 00110111
d = ExtendedMeasurementFrequencyList ( )
packet = a / b / c / d
return packet
|
def _get_view_results ( self , view ) :
"""Get the results based on the view"""
|
if view == TREE_VIEW :
result = self . tree
else :
result = TextResult ( tree_results = self . tree )
result . cleanup ( )
# clean up text - style result dictionary
return result
|
def data ( self ) :
"""Returns the entire configuration as a dict .
Note that this will force all plugins to be loaded ."""
|
d = { }
for key in self . _data :
if key == "plugins" :
d [ key ] = self . plugins . data ( )
else :
try :
d [ key ] = getattr ( self , key )
except AttributeError :
pass
# unknown key , just leave it unchanged
return d
|
def write_loudest_events ( page , bins , onsource = False ) :
"""Write injection chisq plots to markup . page object page"""
|
th = [ '' ] + [ 'Mchirp %s - %s' % tuple ( bin ) for bin in bins ]
td = [ ]
plots = [ 'BestNR' , 'SNR' ]
if onsource :
trial = 'ONSOURCE'
else :
trial = 'OFFTRIAL_1'
for pTag in plots :
row = pTag . lower ( )
d = [ pTag ]
for bin in bins :
b = '%s_%s' % tuple ( bin )
plot = markup . page ( )
p = "%s/efficiency/%s_vs_fap_%s.png" % ( trial , row , b )
plot . a ( href = p , title = "FAP versus %s" % pTag )
plot . img ( src = p )
plot . a . close ( )
d . append ( plot ( ) )
td . append ( d )
row = 'snruncut'
d = [ 'SNR after cuts <br> have been applied' ]
for bin in bins :
b = '%s_%s' % tuple ( bin )
plot = markup . page ( )
p = "%s/efficiency/%s_vs_fap_%s.png" % ( trial , row , b )
plot . a ( href = p , title = "FAP versus %s" % pTag )
plot . img ( src = p )
plot . a . close ( )
d . append ( plot ( ) )
td . append ( d )
page = write_table ( page , th , td )
page . add ( 'For more details on the loudest offsource events see' )
page . a ( href = '%s/efficiency/loudest_offsource_trigs.html' % ( trial ) )
page . add ( 'here.' )
page . a . close ( )
return page
|
def create_customer ( self , full_name , email ) :
"""Creating customer user
: param full _ name : str
: param email : str
: return : New customer"""
|
log . warning ( 'Creating customer...' )
data = { 'fullName' : full_name , 'email' : email }
return self . post ( 'rest/servicedeskapi/customer' , headers = self . experimental_headers , data = data )
|
def _asarray_tuplesafe ( values ) :
"""Convert values into a numpy array of at most 1 - dimension , while preserving
tuples .
Adapted from pandas . core . common . _ asarray _ tuplesafe"""
|
if isinstance ( values , tuple ) :
result = utils . to_0d_object_array ( values )
else :
result = np . asarray ( values )
if result . ndim == 2 :
result = np . empty ( len ( values ) , dtype = object )
result [ : ] = values
return result
|
def _parse_join ( self , tokens ) :
"""Parses a join .
Join : : = ' join ' ' ( ' SuperRange [ ' , ' SuperRange ] ' ) '"""
|
children = [ ]
tokens . pop ( 0 )
# Pop ' join '
tokens . pop ( 0 )
# Pop ' ( '
children . append ( self . _parse_nested_interval ( tokens ) )
while tokens [ 0 ] == ',' :
tokens . pop ( 0 )
children . append ( self . _parse_nested_interval ( tokens ) )
tokens . pop ( 0 )
# Pop ' ) '
chromosome , strand = next ( ( child . chromosome , child . strand ) for child in children )
start = min ( child . start . position for child in children )
stop = max ( child . stop . position for child in children )
parent = NestedGenomicInterval ( start , stop , chromosome = chromosome , strand = strand )
parent . children = children
return parent
|
def parse_component_requirement ( self , node ) :
"""Parses < ComponentRequirement >
@ param node : Node containing the < ComponentRequirement > element
@ type node : xml . etree . Element"""
|
if 'name' in node . lattrib :
name = node . lattrib [ 'name' ]
else :
self . raise_error ( '<ComponentRequirement> must specify a name' )
self . current_component_type . add_component_requirement ( ComponentRequirement ( name ) )
|
def export_to_dict ( self , recursive = True , include_parent_ref = False , include_defaults = False ) :
"""Export obj to dictionary"""
|
cls = self . __class__
parent_excludes = { }
if recursive and not include_parent_ref :
parent_ref = cls . __mapper__ . relationships . get ( cls . export_parent )
if parent_ref :
parent_excludes = { c . name for c in parent_ref . local_columns }
dict_rep = { c . name : getattr ( self , c . name ) for c in cls . __table__ . columns if ( c . name in self . export_fields and c . name not in parent_excludes and ( include_defaults or ( getattr ( self , c . name ) is not None and ( not c . default or getattr ( self , c . name ) != c . default . arg ) ) ) ) }
if recursive :
for c in self . export_children : # sorting to make lists of children stable
dict_rep [ c ] = sorted ( [ child . export_to_dict ( recursive = recursive , include_parent_ref = include_parent_ref , include_defaults = include_defaults , ) for child in getattr ( self , c ) ] , key = lambda k : sorted ( k . items ( ) ) )
return dict_rep
|
def parse_coaches ( self ) :
"""Parse the home and away coaches
: returns : ` ` self ` ` on success , ` ` None ` ` otherwise"""
|
lx_doc = self . html_doc ( )
tr = lx_doc . xpath ( '//tr[@id="HeadCoaches"]' ) [ 0 ]
for i , td in enumerate ( tr ) :
txt = td . xpath ( './/text()' )
txt = ex_junk ( txt , [ '\n' , '\r' ] )
team = 'away' if i == 0 else 'home'
self . coaches [ team ] = txt [ 0 ]
return self if self . coaches else None
|
def cdx_load ( sources , query , process = True ) :
"""merge text CDX lines from sources , return an iterator for
filtered and access - checked sequence of CDX objects .
: param sources : iterable for text CDX sources .
: param process : bool , perform processing sorting / filtering / grouping ops"""
|
cdx_iter = create_merged_cdx_gen ( sources , query )
# page count is a special case , no further processing
if query . page_count :
return cdx_iter
cdx_iter = make_obj_iter ( cdx_iter , query )
if process and not query . secondary_index_only :
cdx_iter = process_cdx ( cdx_iter , query )
custom_ops = query . custom_ops
for op in custom_ops :
cdx_iter = op ( cdx_iter , query )
if query . output == 'text' :
cdx_iter = cdx_to_text ( cdx_iter , query . fields )
elif query . output == 'json' :
cdx_iter = cdx_to_json ( cdx_iter , query . fields )
return cdx_iter
|
def _set_keychain ( self , v , load = False ) :
"""Setter method for keychain , mapped from YANG variable / keychain ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ keychain is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ keychain ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "name_of_keychain" , keychain . keychain , yang_name = "keychain" , rest_name = "keychain" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'name-of-keychain' , extensions = { u'tailf-common' : { u'info' : u'Keychain configuration' , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-full-command' : None , u'callpoint' : u'Keychain' , u'cli-mode-name' : u'config-$(name-of-keychain)' } } ) , is_container = 'list' , yang_name = "keychain" , rest_name = "keychain" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Keychain configuration' , u'cli-full-no' : None , u'cli-suppress-list-no' : None , u'cli-full-command' : None , u'callpoint' : u'Keychain' , u'cli-mode-name' : u'config-$(name-of-keychain)' } } , namespace = 'urn:brocade.com:mgmt:brocade-keychain' , defining_module = 'brocade-keychain' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """keychain must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("name_of_keychain",keychain.keychain, yang_name="keychain", rest_name="keychain", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name-of-keychain', extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}), is_container='list', yang_name="keychain", rest_name="keychain", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Keychain configuration', u'cli-full-no': None, u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'Keychain', u'cli-mode-name': u'config-$(name-of-keychain)'}}, namespace='urn:brocade.com:mgmt:brocade-keychain', defining_module='brocade-keychain', yang_type='list', is_config=True)""" , } )
self . __keychain = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def widont ( value , count = 1 ) :
"""Add an HTML non - breaking space between the final two words of the string to
avoid " widowed " words .
Examples :
> > > print ( widont ( ' Test me out ' ) )
Test me & nbsp ; out
> > > print ( " ' " , widont ( ' It works with trailing spaces too ' ) , " ' " )
' It works with trailing spaces & nbsp ; too '
> > > print ( widont ( ' NoEffect ' ) )
NoEffect"""
|
def replace ( matchobj ) :
return force_text ( ' %s' % matchobj . group ( 1 ) )
for i in range ( count ) :
value = re_widont . sub ( replace , force_text ( value ) )
return value
|
def count ( self ) :
"""Registered task count
: return : int"""
|
result = 0
for tasks in self . __registry . values ( ) :
result += len ( tasks )
return result
|
def upload_string ( to_upload , media_type = None , keep_open = False , wait_on_close = False , ** kwargs ) :
""": param to _ upload : String to upload into a file
: type to _ upload : string
: param media _ type : Internet Media Type
: type media _ type : string
: param keep _ open : If False , closes the file after uploading
: type keep _ open : boolean
: param wait _ on _ close : If True , waits for the file to close
: type wait _ on _ close : boolean
: returns : Remote file handler
: rtype : : class : ` ~ dxpy . bindings . dxfile . DXFile `
Additional optional parameters not listed : all those under
: func : ` dxpy . bindings . DXDataObject . new ` .
Uploads the data in the string * to _ upload * into a new file object
( with media type * media _ type * if given ) and returns the associated
remote file handler ."""
|
# Use ' a ' mode because we will be responsible for closing the file
# ourselves later ( if requested ) .
handler = new_dxfile ( media_type = media_type , mode = 'a' , ** kwargs )
# For subsequent API calls , don ' t supply the dataobject metadata
# parameters that are only needed at creation time .
_ , remaining_kwargs = dxpy . DXDataObject . _get_creation_params ( kwargs )
handler . write ( to_upload , ** remaining_kwargs )
if not keep_open :
handler . close ( block = wait_on_close , ** remaining_kwargs )
return handler
|
def FromBinary ( cls , record_data , record_count = 1 ) :
"""Create an UpdateRecord subclass from binary record data .
This should be called with a binary record blob ( NOT including the
record type header ) and it will decode it into a SetConstantRecord .
Args :
record _ data ( bytearray ) : The raw record data that we wish to parse
into an UpdateRecord subclass NOT including its 8 byte record header .
record _ count ( int ) : The number of records included in record _ data .
Raises :
ArgumentError : If the record _ data is malformed and cannot be parsed .
Returns :
SetConstantRecord : The decoded reflash tile record ."""
|
_cmd , address , _resp_length , payload = cls . _parse_rpc_info ( record_data )
try :
value , encoded_stream = struct . unpack ( "<LH" , payload )
stream = DataStream . FromEncoded ( encoded_stream )
except ValueError :
raise ArgumentError ( "Could not parse set_constant payload" , payload = payload )
return SetConstantRecord ( stream , value , address = address )
|
def overlap ( self , x , j = None ) :
"""Checks how many ellipsoid ( s ) ` x ` falls within , skipping the ` j ` - th
ellipsoid ."""
|
q = len ( self . within ( x , j = j ) )
return q
|
def contour_to_geojson ( contour , geojson_filepath = None , min_angle_deg = None , ndigits = 5 , unit = '' , stroke_width = 1 , geojson_properties = None , strdump = False , serialize = True ) :
"""Transform matplotlib . contour to geojson ."""
|
collections = contour . collections
contour_index = 0
line_features = [ ]
for collection in collections :
color = collection . get_edgecolor ( )
for path in collection . get_paths ( ) :
v = path . vertices
if len ( v ) < 3 :
continue
coordinates = keep_high_angle ( v , min_angle_deg )
if ndigits :
coordinates = np . around ( coordinates , ndigits )
line = LineString ( coordinates . tolist ( ) )
properties = { "stroke-width" : stroke_width , "stroke" : rgb2hex ( color [ 0 ] ) , "title" : "%.2f" % contour . levels [ contour_index ] + ' ' + unit , "level-value" : float ( "%.6f" % contour . levels [ contour_index ] ) , "level-index" : contour_index }
if geojson_properties :
properties . update ( geojson_properties )
line_features . append ( Feature ( geometry = line , properties = properties ) )
contour_index += 1
feature_collection = FeatureCollection ( line_features )
return _render_feature_collection ( feature_collection , geojson_filepath , strdump , serialize )
|
def generate_corpus ( self , text ) :
"""Given a text string , returns a list of lists ; that is , a list of
" sentences , " each of which is a list of words . Before splitting into
words , the sentences are filtered through ` self . test _ sentence _ input `"""
|
if isinstance ( text , str ) :
sentences = self . sentence_split ( text )
else :
sentences = [ ]
for line in text :
sentences += self . sentence_split ( line )
passing = filter ( self . test_sentence_input , sentences )
runs = map ( self . word_split , passing )
return runs
|
def register ( linter ) :
"""Required method to auto register this checker ."""
|
linter . register_checker ( NewDbFieldWithDefaultChecker ( linter ) )
if not compat . LOAD_CONFIGURATION_SUPPORTED :
load_configuration ( linter )
|
def dbStore ( self , typ , py_value ) :
"""Prepares to store this column for the a particular backend database .
: param backend : < orb . Database >
: param py _ value : < variant >
: return : < variant >"""
|
if isinstance ( py_value , datetime . datetime ) : # ensure we have some timezone information before converting to UTC time
if py_value . tzinfo is None : # match the server information
tz = pytz . timezone ( orb . system . settings ( ) . server_timezone )
py_value = tz . localize ( py_value )
return py_value . astimezone ( pytz . utc ) . replace ( tzinfo = None )
else :
return super ( DatetimeWithTimezoneColumn , self ) . dbStore ( typ , py_value )
|
def set ( self , instance , value , ** kwargs ) :
"""writes the value to the same named field on the proxy object"""
|
# Retrieve the proxy object
proxy_object = self . get_proxy ( instance )
# Return None if we could not find a proxied object , e . g . through
# the proxy expression ' context . getSample ( ) ' on an AR
if not proxy_object :
logger . debug ( "Expression '{}' did not return a valid Proxy Object on {}" . format ( self . proxy , instance ) )
return None
# Lookup the proxied field by name
field_name = self . getName ( )
field = proxy_object . getField ( field_name )
# Bail out if the proxy object has no identical named field .
if field is None :
raise KeyError ( "Object '{}' with id '{}' has no field named '{}'" . format ( proxy_object . portal_type , proxy_object . getId ( ) , field_name ) )
# set the value on the proxy object
field . set ( proxy_object , value , ** kwargs )
# get the current time
now = DateTime . DateTime ( )
# update the modification date of the proxied object
proxy_object . setModificationDate ( now )
# update the modification date of the holding object
instance . setModificationDate ( now )
|
def _generate_notebook_header ( notebook_object , notebook_type , notebook_title = "Notebook Title" , tags = "tags" , difficulty_stars = 1 , notebook_description = "Notebook Description" ) :
"""Internal function that is used for generation of the generic notebooks header .
Parameters
notebook _ object : notebook object
Object of " notebook " class where the header will be created .
notebook _ type : str
Notebook type : - " Main _ Files _ Signal _ Samples "
- " Main _ Files _ By _ Category "
- " Main _ Files _ By _ Difficulty "
- " Main _ Files _ By _ Tag "
- " Acquire "
- " Open "
- " Visualise "
- " Process "
- " Detect "
- " Extract "
- " Train _ and _ Classify "
- " Explain "
notebook _ title : None or str
The Notebook title should only be defined when ' notebook _ type ' is :
- " Acquire "
- " Open "
- " Visualise "
- " Process "
- " Detect "
- " Extract "
- " Train _ and _ Classify "
- " Explain "
tags : str
Sequence of tags that characterize the Notebook .
difficulty _ stars : int
This input defines the difficulty level of the Notebook instructions .
notebook _ description : str
An introductory text to present the Notebook and involve the reader ."""
|
# = = = = = Creation of Header = = = = =
header_temp = HEADER_ALL_CATEGORIES . replace ( "header_image_color_i" , "header_image_color_" + str ( NOTEBOOK_KEYS [ notebook_type ] ) )
header_temp = header_temp . replace ( "header_image_i" , "header_image_" + str ( NOTEBOOK_KEYS [ notebook_type ] ) )
header_temp = header_temp . replace ( "Notebook Title" , notebook_title )
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( header_temp , ** { "metadata" : { "tags" : [ "intro_info_title" ] } } ) )
# = = = = = Inclusion of the div with " Difficulty " and " Tags " = = = = =
tags_and_diff = HEADER_TAGS . replace ( '<td class="shield_right" id="tags">tags</td>' , '<td class="shield_right" id="tags">' + "☁" . join ( tags ) + '</td>' )
for star in range ( 1 , 6 ) :
if star <= difficulty_stars :
tags_and_diff = tags_and_diff . replace ( "fa fa-star " + str ( star ) , "fa fa-star " "checked" )
else :
tags_and_diff = tags_and_diff . replace ( "fa fa-star " + str ( star ) , "fa fa-star" )
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( tags_and_diff , ** { "metadata" : { "tags" : [ "intro_info_tags" ] } } ) )
# = = = = = Insertion of the div reserved to the Notebook Description = = = = =
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( notebook_description , ** { "metadata" : { "tags" : [ "test" ] } } ) )
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( SEPARATOR ) )
# = = = = = Insertion of a blank Markdown and Code cell = = = = =
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( MD_EXAMPLES ) )
notebook_object [ "cells" ] . append ( nb . v4 . new_code_cell ( CODE_EXAMPLES ) )
|
def process_tessellate ( elem , update_delta , delta , ** kwargs ) :
"""Tessellates surfaces .
. . note : : Helper function required for ` ` multiprocessing ` `
: param elem : surface
: type elem : abstract . Surface
: param update _ delta : flag to control evaluation delta updates
: type update _ delta : bool
: param delta : evaluation delta
: type delta : list , tuple
: return : updated surface
: rtype : abstract . Surface"""
|
if update_delta :
elem . delta = delta
elem . evaluate ( )
elem . tessellate ( ** kwargs )
return elem
|
def flatten_excel ( path = '.' , ext = 'xlsx' , sheetname = 0 , skiprows = None , header = 0 , date_parser = parse_date , verbosity = 0 , output_ext = None ) :
"""Load all Excel files in the given path , write . flat . csv files , return ` DataFrame ` dict
Arguments :
path ( str ) : file or folder to retrieve CSV files and ` pandas . DataFrame ` s from
ext ( str ) : file name extension ( to filter files by )
date _ parser ( function ) : if the MultiIndex can be interpretted as a datetime , this parser will be used
Returns :
dict of DataFrame : { file _ path : flattened _ data _ frame }"""
|
date_parser = date_parser or ( lambda x : x )
dotted_ext , dotted_output_ext = None , None
if ext != None and output_ext != None :
dotted_ext = ( '' if ext . startswith ( '.' ) else '.' ) + ext
dotted_output_ext = ( '' if output_ext . startswith ( '.' ) else '.' ) + output_ext
table = { }
for file_properties in util . find_files ( path , ext = ext or '' , verbosity = verbosity ) :
file_path = file_properties [ 'path' ]
if output_ext and ( dotted_output_ext + '.' ) in file_path :
continue
df = dataframe_from_excel ( file_path , sheetname = sheetname , header = header , skiprows = skiprows )
df = flatten_dataframe ( df , verbosity = verbosity )
if dotted_ext != None and dotted_output_ext != None :
df . to_csv ( file_path [ : - len ( dotted_ext ) ] + dotted_output_ext + dotted_ext )
return table
|
def attributes ( self ) -> Sequence [ bytes ] :
"""The mailbox attributes that should be returned with the mailbox
in a ` ` LIST ` ` response , e . g . ` ` \\ Noselect ` ` .
See Also :
` RFC 3348 < https : / / tools . ietf . org / html / rfc3348 > ` _"""
|
ret : List [ bytes ] = [ ]
if not self . exists :
ret . append ( b'Noselect' )
if self . has_children :
ret . append ( b'HasChildren' )
else :
ret . append ( b'HasNoChildren' )
if self . marked is True :
ret . append ( b'Marked' )
elif self . marked is False :
ret . append ( b'Unmarked' )
return ret
|
def GetFrequencyStartTimes ( self ) :
"""Return a list of start time for each headway - based run .
Returns :
a sorted list of seconds since midnight , the start time of each run . If
this trip doesn ' t have headways returns an empty list ."""
|
start_times = [ ]
# for each headway period of the trip
for freq_tuple in self . GetFrequencyTuples ( ) :
( start_secs , end_secs , headway_secs ) = freq_tuple [ 0 : 3 ]
# reset run secs to the start of the timeframe
run_secs = start_secs
while run_secs < end_secs :
start_times . append ( run_secs )
# increment current run secs by headway secs
run_secs += headway_secs
return start_times
|
def atan2 ( y , x , context = None ) :
"""Return ` ` atan ( y / x ) ` ` with the appropriate choice of function branch .
If ` ` x > 0 ` ` , then ` ` atan2 ( y , x ) ` ` is mathematically equivalent to ` ` atan ( y
/ x ) ` ` . If ` ` x < 0 ` ` and ` ` y > 0 ` ` , ` ` atan ( y , x ) ` ` is equivalent to ` ` π +
atan ( y , x ) ` ` . If ` ` x < 0 ` ` and ` ` y < 0 ` ` , the result is ` ` - π + atan ( y ,
Geometrically , ` ` atan2 ( y , x ) ` ` is the angle ( measured counterclockwise , in
radians ) from the positive x - axis to the line segment joining ( 0 , 0 ) to ( x ,
y ) , in the usual representation of the x - y plane .
Special values are handled as described in the ISO C99 and IEEE 754-2008
standards for the atan2 function . The following examples illustrate the
rules for positive y ; for negative y , apply the symmetry ` ` atan ( - y , x ) = =
- atan ( y , x ) ` ` .
> > > finite = positive = 2.3
> > > negative = - 2.3
> > > inf = BigFloat ( ' inf ' )
> > > print ( atan2 ( + 0.0 , - 0.0 ) ) # pi
3.1415926535897931
> > > print ( atan2 ( + 0.0 , + 0.0 ) ) # 0
> > > print ( atan2 ( + 0.0 , negative ) ) # pi
3.1415926535897931
> > > print ( atan2 ( + 0.0 , positive ) ) # 0
> > > print ( atan2 ( positive , 0.0 ) ) # pi / 2
1.5707963267948966
> > > print ( atan2 ( inf , - inf ) ) # 3 * pi / 4
2.3561944901923448
> > > print ( atan2 ( inf , inf ) ) # pi / 4
0.78539816339744828
> > > print ( atan2 ( inf , finite ) ) # pi / 2
1.5707963267948966
> > > print ( atan2 ( positive , - inf ) ) # pi
3.1415926535897931
> > > print ( atan2 ( positive , + inf ) ) # 0"""
|
return _apply_function_in_current_context ( BigFloat , mpfr . mpfr_atan2 , ( BigFloat . _implicit_convert ( y ) , BigFloat . _implicit_convert ( x ) , ) , context , )
|
def _replace_tables ( self , soup , v_separator = ' | ' , h_separator = '-' ) :
"""Replaces < table > elements with its ASCII equivalent ."""
|
tables = self . _parse_tables ( soup )
v_sep_len = len ( v_separator )
v_left_sep = v_separator . lstrip ( )
for t in tables :
html = ''
trs = t [ 'trs' ]
h_length = 1 + ( v_sep_len * len ( t [ 'col_width' ] ) ) + t [ 'width' ]
head_foot = ( h_separator * h_length ) + "\n"
html += head_foot
for tr in trs :
html += v_left_sep
for i , td in enumerate ( tr ) :
text = td [ 'text' ]
col_width = t [ 'col_width' ] [ i ] + v_sep_len
if td [ 'colspan' ] > 1 :
for j in range ( td [ 'colspan' ] - 1 ) :
j = j + 1
if ( i + j ) < len ( t [ 'col_width' ] ) :
col_width += t [ 'col_width' ] [ i + j ] + v_sep_len
html += ( '%' + str ( col_width ) + 's' ) % ( text + v_separator )
html += "\n"
html += head_foot
new_table = soup . new_tag ( 'div' )
new_table . string = html
t [ 'table' ] . replace_with ( new_table )
return soup
|
def get_new_tag ( self , api_tag ) :
"""Instantiate a new Tag from api data .
: param api _ tag : the api data for the Tag
: return : the new Tag"""
|
return Tag ( site_id = self . site_id , wp_id = api_tag [ "ID" ] , ** self . api_object_data ( "tag" , api_tag ) )
|
def has_layer ( fcollection ) :
"""Returns true for a multi - layer dict of FeatureCollections ."""
|
for val in six . viewvalues ( fcollection ) :
if has_features ( val ) :
return True
return False
|
def safeMkdir ( p , permissions = permissions755 ) :
'''Wrapper around os . mkdir which does not raise an error if the directory exists .'''
|
try :
os . mkdir ( p )
except OSError :
pass
os . chmod ( p , permissions )
|
def execute_commands ( self , mapping , * args , ** kwargs ) :
"""Concurrently executes a sequence of commands on a Redis cluster that
are associated with a routing key , returning a new mapping where
values are a list of results that correspond to the command in the same
position . For example : :
> > > cluster . execute _ commands ( {
. . . ' foo ' : [
. . . ( ' PING ' , ) ,
. . . ( ' TIME ' , ) ,
. . . ' bar ' : [
. . . ( ' CLIENT ' , ' GETNAME ' ) ,
{ ' bar ' : [ < Promise None > ] ,
' foo ' : [ < Promise True > , < Promise ( 1454446079 , 418404 ) > ] }
Commands that are instances of : class : ` redis . client . Script ` will first
be checked for their existence on the target nodes then loaded on the
targets before executing and can be interleaved with other commands : :
> > > from redis . client import Script
> > > TestScript = Script ( None , ' return { KEYS , ARGV } ' )
> > > cluster . execute _ commands ( {
. . . ' foo ' : [
. . . ( TestScript , ( ' key : 1 ' , ' key : 2 ' ) , range ( 0 , 3 ) ) ,
. . . ' bar ' : [
. . . ( TestScript , ( ' key : 3 ' , ' key : 4 ' ) , range ( 3 , 6 ) ) ,
{ ' bar ' : [ < Promise [ [ ' key : 3 ' , ' key : 4 ' ] , [ ' 3 ' , ' 4 ' , ' 5 ' ] ] > ] ,
' foo ' : [ < Promise [ [ ' key : 1 ' , ' key : 2 ' ] , [ ' 0 ' , ' 1 ' , ' 2 ' ] ] > ] }
Internally , : class : ` FanoutClient ` is used for issuing commands ."""
|
def is_script_command ( command ) :
return isinstance ( command [ 0 ] , Script )
def check_script_load_result ( script , result ) :
if script . sha != result :
raise AssertionError ( 'Hash mismatch loading {!r}: expected {!r}, got {!r}' . format ( script , script . sha , result , ) )
# Run through all the commands and check to see if there are any
# scripts , and whether or not they have been loaded onto the target
# hosts .
exists = { }
with self . fanout ( * args , ** kwargs ) as client :
for key , commands in mapping . items ( ) :
targeted = client . target_key ( key )
for command in filter ( is_script_command , commands ) :
script = command [ 0 ]
# Set the script hash if it hasn ' t already been set .
if not script . sha :
script . sha = sha1 ( script . script ) . hexdigest ( )
# Check if the script has been loaded on each host that it
# will be executed on .
for host in targeted . _target_hosts :
if script not in exists . setdefault ( host , { } ) :
exists [ host ] [ script ] = targeted . execute_command ( 'SCRIPT EXISTS' , script . sha )
# Execute the pending commands , loading scripts onto servers where they
# do not already exist .
results = { }
with self . fanout ( * args , ** kwargs ) as client :
for key , commands in mapping . items ( ) :
results [ key ] = [ ]
targeted = client . target_key ( key )
for command in commands : # If this command is a script , we need to check and see if
# it needs to be loaded before execution .
if is_script_command ( command ) :
script = command [ 0 ]
for host in targeted . _target_hosts :
if script in exists [ host ] :
result = exists [ host ] . pop ( script )
if not result . value [ 0 ] :
targeted . execute_command ( 'SCRIPT LOAD' , script . script ) . done ( on_success = functools . partial ( check_script_load_result , script ) )
keys , arguments = command [ 1 : ]
parameters = list ( keys ) + list ( arguments )
results [ key ] . append ( targeted . execute_command ( 'EVALSHA' , script . sha , len ( keys ) , * parameters ) )
else :
results [ key ] . append ( targeted . execute_command ( * command ) )
return results
|
def _notify ( self , topic , ** kwargs ) :
"""Invokes callbacks for an event topic .
@ param topic : String event name
@ type topic : str
@ param kwargs : Values associated with the event
@ type kwargs : dict"""
|
for cb in self . _connects . get ( topic , [ ] ) :
try :
cb ( ** kwargs )
except Exception :
if self . _debug :
traceback . print_exc ( )
|
def _add_id_or_name ( flat_path , element_pb , empty_allowed ) :
"""Add the ID or name from an element to a list .
: type flat _ path : list
: param flat _ path : List of accumulated path parts .
: type element _ pb : : class : ` . _ app _ engine _ key _ pb2 . Path . Element `
: param element _ pb : The element containing ID or name .
: type empty _ allowed : bool
: param empty _ allowed : Indicates if neither ID or name need be set . If
: data : ` False ` , then * * exactly * * one of them must be .
: raises : : exc : ` ValueError ` if 0 or 2 of ID / name are set ( unless
` ` empty _ allowed = True ` ` and 0 are set ) ."""
|
id_ = element_pb . id
name = element_pb . name
# NOTE : Below 0 and the empty string are the " null " values for their
# respective types , indicating that the value is unset .
if id_ == 0 :
if name == u"" :
if not empty_allowed :
raise ValueError ( _EMPTY_ELEMENT )
else :
flat_path . append ( name )
else :
if name == u"" :
flat_path . append ( id_ )
else :
msg = _BAD_ELEMENT_TEMPLATE . format ( id_ , name )
raise ValueError ( msg )
|
def match_input_fmt ( self , fmt_list ) :
"""Given a list of Fortran format specifiers , e . g . , [ ' I5 ' , ' 2X ' , ' F4.1 ' ] ,
this function constructs a list of tuples for matching an input
string against those format specifiers ."""
|
rexp_list = [ ]
for fmt in fmt_list :
rexp_list . extend ( self . match_input_fmt_1 ( fmt ) )
return rexp_list
|
def plot_and_save ( self , data , w = 800 , h = 420 , filename = 'chart' , overwrite = True ) :
"""Save the rendered html to a file and returns an IFrame to display the plot in the notebook ."""
|
self . save ( data , filename , overwrite )
return IFrame ( filename + '.html' , w , h )
|
def once ( ctx , name ) :
"""Run kibitzr checks once and exit"""
|
from kibitzr . app import Application
app = Application ( )
sys . exit ( app . run ( once = True , log_level = ctx . obj [ 'log_level' ] , names = name ) )
|
def message_details ( self , message_id , statistics = False ) :
"""Gets the details of this message ."""
|
response = self . _get ( "/transactional/messages/%s?statistics=%s" % ( message_id , statistics ) )
return json_to_py ( response )
|
def canonicalize_gates ( gates : LogicalGates ) -> Dict [ frozenset , LogicalGates ] :
"""Canonicalizes a set of gates by the qubits they act on .
Takes a set of gates specified by ordered sequences of logical
indices , and groups those that act on the same qubits regardless of
order ."""
|
canonicalized_gates = defaultdict ( dict )
# type : DefaultDict [ frozenset , LogicalGates ]
for indices , gate in gates . items ( ) :
indices = tuple ( indices )
canonicalized_gates [ frozenset ( indices ) ] [ indices ] = gate
return { canonical_indices : dict ( list ( gates . items ( ) ) ) for canonical_indices , gates in canonicalized_gates . items ( ) }
|
def ReadHuntFlows ( self , hunt_id , offset , count , filter_condition = db . HuntFlowsCondition . UNSET ) :
"""Reads hunt flows matching given conditins ."""
|
if filter_condition == db . HuntFlowsCondition . UNSET :
filter_fn = lambda _ : True
elif filter_condition == db . HuntFlowsCondition . FAILED_FLOWS_ONLY :
filter_fn = lambda f : f . flow_state == f . FlowState . ERROR
elif filter_condition == db . HuntFlowsCondition . SUCCEEDED_FLOWS_ONLY :
filter_fn = lambda f : f . flow_state == f . FlowState . FINISHED
elif filter_condition == db . HuntFlowsCondition . COMPLETED_FLOWS_ONLY :
filter_fn = ( lambda f : f . flow_state in [ f . FlowState . ERROR , f . FlowState . FINISHED ] )
elif filter_condition == db . HuntFlowsCondition . FLOWS_IN_PROGRESS_ONLY :
filter_fn = lambda f : f . flow_state == f . FlowState . RUNNING
elif filter_condition == db . HuntFlowsCondition . CRASHED_FLOWS_ONLY :
filter_fn = lambda f : f . flow_state == f . FlowState . CRASHED
else :
raise ValueError ( "Invalid filter condition: %d" % filter_condition )
results = [ flow_obj for flow_obj in self . _GetHuntFlows ( hunt_id ) if filter_fn ( flow_obj ) ]
results . sort ( key = lambda f : f . last_update_time )
return results [ offset : offset + count ]
|
def handle_vcf_calls ( vcf_file , data , orig_items ) :
"""Prioritize VCF calls based on external annotations supplied through GEMINI ."""
|
if not _do_prioritize ( orig_items ) :
return vcf_file
else :
ann_vcf = population . run_vcfanno ( vcf_file , data )
if ann_vcf :
priority_file = _prep_priority_filter_vcfanno ( ann_vcf , data )
return _apply_priority_filter ( ann_vcf , priority_file , data )
# No data available for filtering , return original file
else :
return vcf_file
|
def _add_series_or_dataframe_operations ( cls ) :
"""Add the series or dataframe only operations to the cls ; evaluate
the doc strings again ."""
|
from pandas . core import window as rwindow
@ Appender ( rwindow . rolling . __doc__ )
def rolling ( self , window , min_periods = None , center = False , win_type = None , on = None , axis = 0 , closed = None ) :
axis = self . _get_axis_number ( axis )
return rwindow . rolling ( self , window = window , min_periods = min_periods , center = center , win_type = win_type , on = on , axis = axis , closed = closed )
cls . rolling = rolling
@ Appender ( rwindow . expanding . __doc__ )
def expanding ( self , min_periods = 1 , center = False , axis = 0 ) :
axis = self . _get_axis_number ( axis )
return rwindow . expanding ( self , min_periods = min_periods , center = center , axis = axis )
cls . expanding = expanding
@ Appender ( rwindow . ewm . __doc__ )
def ewm ( self , com = None , span = None , halflife = None , alpha = None , min_periods = 0 , adjust = True , ignore_na = False , axis = 0 ) :
axis = self . _get_axis_number ( axis )
return rwindow . ewm ( self , com = com , span = span , halflife = halflife , alpha = alpha , min_periods = min_periods , adjust = adjust , ignore_na = ignore_na , axis = axis )
cls . ewm = ewm
|
def plot_gen_diff ( networkA , networkB , leave_out_carriers = [ 'geothermal' , 'oil' , 'other_non_renewable' , 'reservoir' , 'waste' ] ) :
"""Plot difference in generation between two networks grouped by carrier type
Parameters
networkA : PyPSA network container with switches
networkB : PyPSA network container without switches
leave _ out _ carriers : list of carriers to leave out ( default to all small
carriers )
Returns
Plot"""
|
def gen_by_c ( network ) :
gen = pd . concat ( [ network . generators_t . p . mul ( network . snapshot_weightings , axis = 0 ) [ network . generators [ network . generators . control != 'Slack' ] . index ] , network . generators_t . p . mul ( network . snapshot_weightings , axis = 0 ) [ network . generators [ network . generators . control == 'Slack' ] . index ] . iloc [ : , 0 ] . apply ( lambda x : x if x > 0 else 0 ) ] , axis = 1 ) . groupby ( network . generators . carrier , axis = 1 ) . sum ( )
return gen
gen = gen_by_c ( networkB )
gen_switches = gen_by_c ( networkA )
diff = gen_switches - gen
colors = coloring ( )
diff . drop ( leave_out_carriers , axis = 1 , inplace = True )
colors = [ colors [ col ] for col in diff . columns ]
plot = diff . plot ( kind = 'line' , color = colors , use_index = False )
plot . legend ( loc = 'upper left' , ncol = 5 , prop = { 'size' : 8 } )
x = [ ]
for i in range ( 0 , len ( diff ) ) :
x . append ( i )
plt . xticks ( x , x )
plot . set_xlabel ( 'Timesteps' )
plot . set_ylabel ( 'Difference in Generation in MW' )
plot . set_title ( 'Difference in Generation' )
plt . tight_layout ( )
|
def exists ( self , root ) :
"""Check to see if the < xs : import / > already exists
in the specified schema root by matching I { namesapce } .
@ param root : A schema root .
@ type root : L { Element }"""
|
for node in root . children :
if node . name != 'import' :
continue
ns = node . get ( 'namespace' )
if self . ns == ns :
return 1
return 0
|
def sweObjectLon ( obj , jd ) :
"""Returns the longitude of an object ."""
|
sweObj = SWE_OBJECTS [ obj ]
sweList = swisseph . calc_ut ( jd , sweObj )
return sweList [ 0 ]
|
def revoke ( self ) :
"""LeaseRevoke revokes a lease .
All keys attached to the lease will expire and be deleted .
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please define a ` callback ` function
to be invoked when receiving the response .
: return :"""
|
self . client . post ( self . client . get_url ( "/kv/lease/revoke" ) , json = { "ID" : self . id } )
return True
|
def search ( self , term ) :
"""Searches the PyPi repository for the given ` term ` and returns a
dictionary of results .
New in 2.1.5 : returns a dictionary instead of list of tuples"""
|
packages = { }
results = self . _execute_pip ( [ 'search' , term ] , log = False )
# Don ' t want to log searches
for result in results . split ( linesep ) :
try :
name , description = result . split ( six . u ( ' - ' ) , 1 )
except ValueError : # ' - ' not in result so unable to split into tuple ;
# this could be from a multi - line description
continue
else :
name = name . strip ( )
if len ( name ) == 0 :
continue
packages [ name ] = description . split ( six . u ( '<br' ) , 1 ) [ 0 ] . strip ( )
return packages
|
def renew ( domain_name , years , promotion_code = None ) :
'''Try to renew the specified expiring domain name for a specified number of years
domain _ name
The domain name to be renewed
years
Number of years to renew
Returns the following information :
- Whether or not the domain was renewed successfully
- The domain ID
- The order ID
- The transaction ID
- The amount charged for renewal
CLI Example :
. . code - block : : bash
salt ' my - minion ' namecheap _ domains . renew my - domain - name 5'''
|
opts = salt . utils . namecheap . get_opts ( 'namecheap.domains.renew' )
opts [ 'DomainName' ] = domain_name
opts [ 'Years' ] = years
if promotion_code is not None :
opts [ 'PromotionCode' ] = promotion_code
response_xml = salt . utils . namecheap . post_request ( opts )
if response_xml is None :
return { }
domainrenewresult = response_xml . getElementsByTagName ( "DomainRenewResult" ) [ 0 ]
return salt . utils . namecheap . xml_to_dict ( domainrenewresult )
|
def get_attrs ( self ) :
"""retrieve our attributes"""
|
self . non_index_axes = getattr ( self . attrs , 'non_index_axes' , None ) or [ ]
self . data_columns = getattr ( self . attrs , 'data_columns' , None ) or [ ]
self . info = getattr ( self . attrs , 'info' , None ) or dict ( )
self . nan_rep = getattr ( self . attrs , 'nan_rep' , None )
self . encoding = _ensure_encoding ( getattr ( self . attrs , 'encoding' , None ) )
self . errors = _ensure_decoded ( getattr ( self . attrs , 'errors' , 'strict' ) )
self . levels = getattr ( self . attrs , 'levels' , None ) or [ ]
self . index_axes = [ a . infer ( self ) for a in self . indexables if a . is_an_indexable ]
self . values_axes = [ a . infer ( self ) for a in self . indexables if not a . is_an_indexable ]
self . metadata = getattr ( self . attrs , 'metadata' , None ) or [ ]
|
def list_locks ( root = None ) :
'''List current package locks .
root
operate on a different root directory .
Return a dict containing the locked package with attributes : :
{ ' < package > ' : { ' case _ sensitive ' : ' < case _ sensitive > ' ,
' match _ type ' : ' < match _ type > '
' type ' : ' < type > ' } }
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ locks'''
|
locks = { }
_locks = os . path . join ( root , os . path . relpath ( LOCKS , os . path . sep ) ) if root else LOCKS
try :
with salt . utils . files . fopen ( _locks ) as fhr :
items = salt . utils . stringutils . to_unicode ( fhr . read ( ) ) . split ( '\n\n' )
for meta in [ item . split ( '\n' ) for item in items ] :
lock = { }
for element in [ el for el in meta if el ] :
if ':' in element :
lock . update ( dict ( [ tuple ( [ i . strip ( ) for i in element . split ( ':' , 1 ) ] ) , ] ) )
if lock . get ( 'solvable_name' ) :
locks [ lock . pop ( 'solvable_name' ) ] = lock
except IOError :
pass
except Exception :
log . warning ( 'Detected a problem when accessing %s' , _locks )
return locks
|
def write_to_sdpa ( sdp , filename ) :
"""Write the SDP relaxation to SDPA format .
: param sdp : The SDP relaxation to write .
: type sdp : : class : ` ncpol2sdpa . sdp ` .
: param filename : The name of the file . It must have the suffix " . dat - s "
: type filename : str ."""
|
# Coefficient matrices
row_offsets = [ 0 ]
cumulative_sum = 0
for block_size in sdp . block_struct :
cumulative_sum += block_size ** 2
row_offsets . append ( cumulative_sum )
multiplier = 1
if sdp . F . dtype == np . complex128 :
multiplier = 2
lines = [ [ ] for _ in range ( multiplier * sdp . n_vars + 1 ) ]
for row in range ( len ( sdp . F . rows ) ) :
if len ( sdp . F . rows [ row ] ) > 0 :
col_index = 0
block_index , i , j = convert_row_to_sdpa_index ( sdp . block_struct , row_offsets , row )
for k in sdp . F . rows [ row ] :
value = sdp . F . data [ row ] [ col_index ]
col_index += 1
if k == 0 :
value *= - 1
if sdp . F . dtype == np . float64 :
lines [ k ] . append ( '{0}\t{1}\t{2}\t{3}\n' . format ( block_index + 1 , i + 1 , j + 1 , value ) )
else :
bs = sdp . block_struct [ block_index ]
if value . real != 0 :
lines [ k ] . append ( '{0}\t{1}\t{2}\t{3}\n' . format ( block_index + 1 , i + 1 , j + 1 , value . real ) )
lines [ k ] . append ( '{0}\t{1}\t{2}\t{3}\n' . format ( block_index + 1 , i + bs + 1 , j + bs + 1 , value . real ) )
if value . imag != 0 :
lines [ k + sdp . n_vars ] . append ( '{0}\t{1}\t{2}\t{3}\n' . format ( block_index + 1 , i + 1 , j + bs + 1 , value . imag ) )
lines [ k + sdp . n_vars ] . append ( '{0}\t{1}\t{2}\t{3}\n' . format ( block_index + 1 , j + 1 , i + bs + 1 , - value . imag ) )
file_ = open ( filename , 'w' )
file_ . write ( '"file ' + filename + ' generated by ncpol2sdpa"\n' )
file_ . write ( str ( multiplier * sdp . n_vars ) + ' = number of vars\n' )
# bloc structure
block_struct = [ multiplier * blk_size for blk_size in sdp . block_struct ]
file_ . write ( str ( len ( block_struct ) ) + ' = number of blocs\n' )
file_ . write ( str ( block_struct ) . replace ( '[' , '(' ) . replace ( ']' , ')' ) )
file_ . write ( ' = BlocStructure\n' )
# c vector ( objective )
objective = str ( list ( sdp . obj_facvar ) ) . replace ( '[' , '' ) . replace ( ']' , '' )
if multiplier == 2 :
objective += ', ' + objective
file_ . write ( '{' + objective + '}\n' )
for k , line in enumerate ( lines ) :
if line == [ ] :
continue
for item in line :
file_ . write ( '{0}\t' . format ( k ) + item )
file_ . close ( )
|
def workflow_stop ( obj , names ) :
"""Stop one or more running workflows .
NAMES : The names , ids or job ids of the workflows that should be stopped .
Leave empty to stop all running workflows ."""
|
if len ( names ) == 0 :
msg = 'Would you like to stop all workflows?'
else :
msg = '\n{}\n\n{}' . format ( '\n' . join ( names ) , 'Would you like to stop these jobs?' )
if click . confirm ( msg , default = True , abort = True ) :
stop_workflow ( obj [ 'config' ] , names = names if len ( names ) > 0 else None )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.