signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def delete_column ( self , id_or_name ) :
"""Deletes a Column by its id or name
: param id _ or _ name : the id or name of the column
: return bool : Success or Failure""" | url = self . build_url ( self . _endpoints . get ( 'delete_column' ) . format ( id = quote ( id_or_name ) ) )
return bool ( self . session . post ( url ) ) |
def get ( self ) :
"""Get a JSON - ready representation of this TrackingSettings .
: returns : This TrackingSettings , ready for use in a request body .
: rtype : dict""" | tracking_settings = { }
if self . click_tracking is not None :
tracking_settings [ "click_tracking" ] = self . click_tracking . get ( )
if self . open_tracking is not None :
tracking_settings [ "open_tracking" ] = self . open_tracking . get ( )
if self . subscription_tracking is not None :
tracking_settings [ "subscription_tracking" ] = self . subscription_tracking . get ( )
if self . ganalytics is not None :
tracking_settings [ "ganalytics" ] = self . ganalytics . get ( )
return tracking_settings |
def gen_pager ( self , kind , cat_slug , page_num , current ) :
'''cat _ slug 分类
page _ num 页面总数
current 当前页面''' | if page_num == 1 :
return ''
pager_shouye = '''<li class="{0}"> <a href="/label/{1}/{2}"><< 首页</a>
</li>''' . format ( 'hidden' if current <= 1 else '' , kind , cat_slug )
pager_pre = '''<li class="{0}"><a href="/label/{1}/{2}/{3}">< 前页</a>
</li>''' . format ( 'hidden' if current <= 1 else '' , kind , cat_slug , current - 1 )
pager_mid = ''
for ind in range ( 0 , page_num ) :
tmp_mid = '''<li class="{0}"><a href="/label/{1}/{2}/{3}">{3}</a>
</li>''' . format ( 'active' if ind + 1 == current else '' , kind , cat_slug , ind + 1 )
pager_mid += tmp_mid
pager_next = '''<li class=" {0}"><a href="/label/{1}/{2}/{3}">后页 ></a>
</li>''' . format ( 'hidden' if current >= page_num else '' , kind , cat_slug , current + 1 )
pager_last = '''<li class=" {0}"><a href="/label/{1}/{2}/{3}">末页>></a>
</li>''' . format ( 'hidden' if current >= page_num else '' , kind , cat_slug , page_num )
pager = pager_shouye + pager_pre + pager_mid + pager_next + pager_last
return pager |
def run ( self , input_fname , ids_per_job , stagger = 0 , ** wait_params ) :
"""Run this submission all the way .
This method will run both ` submit _ reading ` and ` watch _ and _ wait ` ,
blocking on the latter .""" | submit_thread = Thread ( target = self . submit_reading , args = ( input_fname , 0 , None , ids_per_job ) , kwargs = { 'stagger' : stagger } , daemon = True )
submit_thread . start ( )
self . watch_and_wait ( ** wait_params )
submit_thread . join ( 0 )
if submit_thread . is_alive ( ) :
logger . warning ( "Submit thread is still running even after job" "completion." )
return |
def findFrequentSequentialPatterns ( self , dataset ) :
""". . note : : Experimental
Finds the complete set of frequent sequential patterns in the input sequences of itemsets .
: param dataset : A dataframe containing a sequence column which is
` ArrayType ( ArrayType ( T ) ) ` type , T is the item type for the input dataset .
: return : A ` DataFrame ` that contains columns of sequence and corresponding frequency .
The schema of it will be :
- ` sequence : ArrayType ( ArrayType ( T ) ) ` ( T is the item type )
- ` freq : Long `
> > > from pyspark . ml . fpm import PrefixSpan
> > > from pyspark . sql import Row
> > > df = sc . parallelize ( [ Row ( sequence = [ [ 1 , 2 ] , [ 3 ] ] ) ,
. . . Row ( sequence = [ [ 1 ] , [ 3 , 2 ] , [ 1 , 2 ] ] ) ,
. . . Row ( sequence = [ [ 1 , 2 ] , [ 5 ] ] ) ,
. . . Row ( sequence = [ [ 6 ] ] ) ] ) . toDF ( )
> > > prefixSpan = PrefixSpan ( minSupport = 0.5 , maxPatternLength = 5)
> > > prefixSpan . findFrequentSequentialPatterns ( df ) . sort ( " sequence " ) . show ( truncate = False )
| sequence | freq |
| [ [ 1 ] ] | 3 |
| [ [ 1 ] , [ 3 ] ] | 2 |
| [ [ 1 , 2 ] ] | 3 |
| [ [ 2 ] ] | 3 |
| [ [ 3 ] ] | 2 |
. . versionadded : : 2.4.0""" | self . _transfer_params_to_java ( )
jdf = self . _java_obj . findFrequentSequentialPatterns ( dataset . _jdf )
return DataFrame ( jdf , dataset . sql_ctx ) |
def _get_access_token ( self , verifier = None ) :
"""Fetch an access token from ` self . access _ token _ url ` .""" | response , content = self . client ( verifier ) . request ( self . access_token_url , "POST" )
content = smart_unicode ( content )
if not response [ 'status' ] == '200' :
raise OAuthError ( _ ( u"Invalid status code %s while obtaining access token from %s: %s" ) % ( response [ 'status' ] , self . access_token_url , content ) )
token = dict ( urlparse . parse_qsl ( content ) )
return ( oauth . Token ( token [ 'oauth_token' ] , token [ 'oauth_token_secret' ] ) , token ) |
def _get_column_by_db_name ( cls , name ) :
"""Returns the column , mapped by db _ field name""" | return cls . _columns . get ( cls . _db_map . get ( name , name ) ) |
def normalize_cert_dir ( ) :
'''Put old cerfificate form to new one''' | current_cn = get_crt_common_name ( )
if not os . path . isdir ( COZY_CONFIG_PATH ) :
print 'Need to create {}' . format ( COZY_CONFIG_PATH )
os . mkdir ( COZY_CONFIG_PATH , 0755 )
if not os . path . isdir ( CERTIFICATES_PATH ) :
print 'Need to create {}' . format ( CERTIFICATES_PATH )
os . mkdir ( CERTIFICATES_PATH , 0755 )
if not os . path . isdir ( ACME_PRIVATE_PATH ) :
print 'Need to create {}' . format ( ACME_PRIVATE_PATH )
os . mkdir ( ACME_PRIVATE_PATH , 0700 )
if os . path . isfile ( OLD_CERTIFICATE_PATH ) and not os . path . islink ( OLD_CERTIFICATE_PATH ) :
target = '{}/{}.crt' . format ( CERTIFICATES_PATH , current_cn )
print 'Move {} to {}' . format ( CERTIFICATES_PATH , target )
os . rename ( OLD_CERTIFICATE_PATH , target )
else :
print 'Nothing to do for {}' . format ( OLD_CERTIFICATE_PATH )
if os . path . isfile ( OLD_PRIVATE_KEY_PATH ) and not os . path . islink ( OLD_PRIVATE_KEY_PATH ) :
target = '{}/{}.key' . format ( CERTIFICATES_PATH , current_cn )
print 'Move {} to {}' . format ( OLD_PRIVATE_KEY_PATH , target )
os . rename ( OLD_PRIVATE_KEY_PATH , target )
else :
print 'Nothing to do for {}' . format ( OLD_PRIVATE_KEY_PATH )
if current_cn :
make_links ( current_cn ) |
def receiver ( self , func = None , json = False ) :
"""Registers a receiver function""" | self . receivers . append ( ( func , json ) ) |
def _parse_target ( target ) :
"""Parse a binary targeting information structure .
This function only supports extracting the slot number or controller from
the target and will raise an ArgumentError if more complicated targeting
is desired .
Args :
target ( bytes ) : The binary targeting data blob .
Returns :
dict : The parsed targeting data""" | if len ( target ) != 8 :
raise ArgumentError ( "Invalid targeting data length" , expected = 8 , length = len ( target ) )
slot , match_op = struct . unpack ( "<B6xB" , target )
if match_op == _MATCH_CONTROLLER :
return { 'controller' : True , 'slot' : 0 }
elif match_op == _MATCH_SLOT :
return { 'controller' : False , 'slot' : slot }
raise ArgumentError ( "Unsupported complex targeting specified" , match_op = match_op ) |
def run_spyder ( app , options , args ) :
"""Create and show Spyder ' s main window
Start QApplication event loop""" | # TODO : insert here
# Main window
main = MainWindow ( options )
try :
main . setup ( )
except BaseException :
if main . console is not None :
try :
main . console . shell . exit_interpreter ( )
except BaseException :
pass
raise
main . show ( )
main . post_visible_setup ( )
if main . console :
main . console . shell . interpreter . namespace [ 'spy' ] = Spy ( app = app , window = main )
# Open external files passed as args
if args :
for a in args :
main . open_external_file ( a )
# Don ' t show icons in menus for Mac
if sys . platform == 'darwin' :
QCoreApplication . setAttribute ( Qt . AA_DontShowIconsInMenus , True )
# Open external files with our Mac app
if running_in_mac_app ( ) :
app . sig_open_external_file . connect ( main . open_external_file )
# To give focus again to the last focused widget after restoring
# the window
app . focusChanged . connect ( main . change_last_focused_widget )
if not running_under_pytest ( ) :
app . exec_ ( )
return main |
def save_if_changed ( self , cancelable = False , index = None ) :
"""Ask user to save file if modified .
Args :
cancelable : Show Cancel button .
index : File to check for modification .
Returns :
False when save ( ) fails or is cancelled .
True when save ( ) is successful , there are no modifications ,
or user selects No or NoToAll .
This function controls the message box prompt for saving
changed files . The actual save is performed in save ( ) for
each index processed . This function also removes autosave files
corresponding to files the user chooses not to save .""" | if index is None :
indexes = list ( range ( self . get_stack_count ( ) ) )
else :
indexes = [ index ]
buttons = QMessageBox . Yes | QMessageBox . No
if cancelable :
buttons |= QMessageBox . Cancel
unsaved_nb = 0
for index in indexes :
if self . data [ index ] . editor . document ( ) . isModified ( ) :
unsaved_nb += 1
if not unsaved_nb : # No file to save
return True
if unsaved_nb > 1 :
buttons |= QMessageBox . YesToAll | QMessageBox . NoToAll
yes_all = no_all = False
for index in indexes :
self . set_stack_index ( index )
finfo = self . data [ index ]
if finfo . filename == self . tempfile_path or yes_all :
if not self . save ( index ) :
return False
elif no_all :
self . autosave . remove_autosave_file ( finfo )
elif ( finfo . editor . document ( ) . isModified ( ) and self . save_dialog_on_tests ) :
self . msgbox = QMessageBox ( QMessageBox . Question , self . title , _ ( "<b>%s</b> has been modified." "<br>Do you want to save changes?" ) % osp . basename ( finfo . filename ) , buttons , parent = self )
answer = self . msgbox . exec_ ( )
if answer == QMessageBox . Yes :
if not self . save ( index ) :
return False
elif answer == QMessageBox . No :
self . autosave . remove_autosave_file ( finfo )
elif answer == QMessageBox . YesToAll :
if not self . save ( index ) :
return False
yes_all = True
elif answer == QMessageBox . NoToAll :
self . autosave . remove_autosave_file ( finfo )
no_all = True
elif answer == QMessageBox . Cancel :
return False
return True |
def create ( cls , name , datacenter , backends , vhosts , algorithm , ssl_enable , zone_alter ) :
"""Create a webaccelerator""" | datacenter_id_ = int ( Datacenter . usable_id ( datacenter ) )
params = { 'datacenter_id' : datacenter_id_ , 'name' : name , 'lb' : { 'algorithm' : algorithm } , 'override' : True , 'ssl_enable' : ssl_enable , 'zone_alter' : zone_alter }
if vhosts :
params [ 'vhosts' ] = vhosts
if backends :
params [ 'servers' ] = backends
try :
result = cls . call ( 'hosting.rproxy.create' , params )
cls . echo ( 'Creating your webaccelerator %s' % params [ 'name' ] )
cls . display_progress ( result )
cls . echo ( 'Your webaccelerator have been created' )
return result
except Exception as err :
if err . code == 580142 :
for vhost in params [ 'vhosts' ] :
dns_entry = cls . call ( 'hosting.rproxy.vhost.get_dns_entries' , { 'datacenter' : datacenter_id_ , 'vhost' : vhost } )
txt_record = "@ 3600 IN TXT \"%s=%s\"" % ( dns_entry [ 'key' ] , dns_entry [ 'txt' ] )
cname_record = "%s 3600 IN CNAME %s" % ( dns_entry [ 'key' ] , dns_entry [ 'cname' ] )
cls . echo ( 'The domain %s don\'t use Gandi DNS or you have' ' not sufficient right to alter the zone file. ' 'Edit your zone file adding this TXT and CNAME ' 'record and try again :' % vhost )
cls . echo ( txt_record )
cls . echo ( cname_record )
cls . echo ( '\nOr add a file containing %s at :\n' 'http://%s/%s.txt\n' % ( dns_entry [ 'txt' ] , dns_entry [ 'domain' ] , dns_entry [ 'txt' ] ) )
cls . separator_line ( '-' , 4 )
else :
cls . echo ( err ) |
def _parse_output ( self , output_xml ) :
"""Parses an output , which is generally a switch controlling a set of
lights / outlets , etc .""" | output = Output ( self . _lutron , name = output_xml . get ( 'Name' ) , watts = int ( output_xml . get ( 'Wattage' ) ) , output_type = output_xml . get ( 'OutputType' ) , integration_id = int ( output_xml . get ( 'IntegrationID' ) ) )
return output |
def _resolve_looppart ( parts , assign_path , context ) :
"""recursive function to resolve multiple assignments on loops""" | assign_path = assign_path [ : ]
index = assign_path . pop ( 0 )
for part in parts :
if part is util . Uninferable :
continue
if not hasattr ( part , "itered" ) :
continue
try :
itered = part . itered ( )
except TypeError :
continue
for stmt in itered :
index_node = nodes . Const ( index )
try :
assigned = stmt . getitem ( index_node , context )
except ( AttributeError , exceptions . AstroidTypeError , exceptions . AstroidIndexError , ) :
continue
if not assign_path : # we achieved to resolved the assignment path ,
# don ' t infer the last part
yield assigned
elif assigned is util . Uninferable :
break
else : # we are not yet on the last part of the path
# search on each possibly inferred value
try :
yield from _resolve_looppart ( assigned . infer ( context ) , assign_path , context )
except exceptions . InferenceError :
break |
def kill_given_tasks ( self , task_ids , scale = False , force = None ) :
"""Kill a list of given tasks .
: param list [ str ] task _ ids : tasks to kill
: param bool scale : if true , scale down the app by the number of tasks killed
: param bool force : if true , ignore any current running deployments
: return : True on success
: rtype : bool""" | params = { 'scale' : scale }
if force is not None :
params [ 'force' ] = force
data = json . dumps ( { "ids" : task_ids } )
response = self . _do_request ( 'POST' , '/v2/tasks/delete' , params = params , data = data )
return response == 200 |
def return_item_count_on_subpage ( self , subpage = 1 , total_items = 1 ) :
"""Return the number of items on page .
Args :
* page = The Page to test for
* total _ items = the total item count
Returns :
* Integer - Which represents the calculated number of items on page .""" | up_to_subpage = ( ( subpage - 1 ) * self . subpage_items )
# Number of items up to the page in question
if total_items > up_to_subpage : # Remove all the items up to the page in question
count = total_items - up_to_subpage
else :
count = total_items
if count >= self . subpage_items : # The remaining items are greater than the items per page
# so the answer is a full page
return self . subpage_items
else : # There are less items than a full page ,
return count |
def weave ( * iterables ) :
r"""weave ( seq1 [ , seq2 ] [ . . . ] ) - > iter ( [ seq1[0 ] , seq2[0 ] . . . ] ) .
> > > list ( weave ( [ 1,2,3 ] , [ 4,5,6 , ' A ' ] , [ 6,7,8 , ' B ' , ' C ' ] ) )
[1 , 4 , 6 , 2 , 5 , 7 , 3 , 6 , 8]
Any iterable will work . The first exhausted iterable determines when to
stop . FIXME rethink stopping semantics .
> > > list ( weave ( iter ( ( ' is ' , ' psu ' ) ) , ( ' there ' , ' no ' , ' censorship ' ) ) )
[ ' is ' , ' there ' , ' psu ' , ' no ' ]
> > > list ( weave ( ( ' there ' , ' no ' , ' censorship ' ) , iter ( ( ' is ' , ' psu ' ) ) ) )
[ ' there ' , ' is ' , ' no ' , ' psu ' , ' censorship ' ]""" | iterables = map ( iter , iterables )
while True :
for it in iterables :
yield it . next ( ) |
def create_collection ( cls , collection , ** kwargs ) :
"""Create Collection
Create a new Collection
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ collection ( collection , async = True )
> > > result = thread . get ( )
: param async bool
: param Collection collection : Attributes of collection to create ( required )
: return : Collection
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_collection_with_http_info ( collection , ** kwargs )
else :
( data ) = cls . _create_collection_with_http_info ( collection , ** kwargs )
return data |
def filter_significance ( diff , significance ) :
"""Prune any changes in the patch which are due to numeric changes less than this level of
significance .""" | changed = diff [ 'changed' ]
# remove individual field changes that are significant
reduced = [ { 'key' : delta [ 'key' ] , 'fields' : { k : v for k , v in delta [ 'fields' ] . items ( ) if _is_significant ( v , significance ) } } for delta in changed ]
# call a key changed only if it still has significant changes
filtered = [ delta for delta in reduced if delta [ 'fields' ] ]
diff = diff . copy ( )
diff [ 'changed' ] = filtered
return diff |
def build_input ( data , batch_size , dataset , train ) :
"""Build CIFAR image and labels .
Args :
data _ path : Filename for cifar10 data .
batch _ size : Input batch size .
train : True if we are training and false if we are testing .
Returns :
images : Batches of images of size
[ batch _ size , image _ size , image _ size , 3 ] .
labels : Batches of labels of size [ batch _ size , num _ classes ] .
Raises :
ValueError : When the specified dataset is not supported .""" | image_size = 32
depth = 3
num_classes = 10 if dataset == "cifar10" else 100
images , labels = data
num_samples = images . shape [ 0 ] - images . shape [ 0 ] % batch_size
dataset = tf . contrib . data . Dataset . from_tensor_slices ( ( images [ : num_samples ] , labels [ : num_samples ] ) )
def map_train ( image , label ) :
image = tf . image . resize_image_with_crop_or_pad ( image , image_size + 4 , image_size + 4 )
image = tf . random_crop ( image , [ image_size , image_size , 3 ] )
image = tf . image . random_flip_left_right ( image )
image = tf . image . per_image_standardization ( image )
return ( image , label )
def map_test ( image , label ) :
image = tf . image . resize_image_with_crop_or_pad ( image , image_size , image_size )
image = tf . image . per_image_standardization ( image )
return ( image , label )
dataset = dataset . map ( map_train if train else map_test )
dataset = dataset . batch ( batch_size )
dataset = dataset . repeat ( )
if train :
dataset = dataset . shuffle ( buffer_size = 16 * batch_size )
images , labels = dataset . make_one_shot_iterator ( ) . get_next ( )
images = tf . reshape ( images , [ batch_size , image_size , image_size , depth ] )
labels = tf . reshape ( labels , [ batch_size , 1 ] )
indices = tf . reshape ( tf . range ( 0 , batch_size , 1 ) , [ batch_size , 1 ] )
labels = tf . sparse_to_dense ( tf . concat ( [ indices , labels ] , 1 ) , [ batch_size , num_classes ] , 1.0 , 0.0 )
assert len ( images . get_shape ( ) ) == 4
assert images . get_shape ( ) [ 0 ] == batch_size
assert images . get_shape ( ) [ - 1 ] == 3
assert len ( labels . get_shape ( ) ) == 2
assert labels . get_shape ( ) [ 0 ] == batch_size
assert labels . get_shape ( ) [ 1 ] == num_classes
if not train :
tf . summary . image ( "images" , images )
return images , labels |
def get_metadata ( main_file ) :
"""Get metadata about the package / module .
Positional arguments :
main _ file - - python file path within ` HERE ` which has _ _ author _ _ and the others defined as global variables .
Returns :
Dictionary to be passed into setuptools . setup ( ) .""" | with open ( os . path . join ( HERE , 'README.md' ) , encoding = 'utf-8' ) as f :
long_description = f . read ( )
with open ( os . path . join ( HERE , main_file ) , encoding = 'utf-8' ) as f :
lines = [ l . strip ( ) for l in f if l . startswith ( '__' ) ]
metadata = ast . literal_eval ( "{'" + ", '" . join ( [ l . replace ( ' = ' , "': " ) for l in lines ] ) + '}' )
__author__ , __license__ , __version__ = [ metadata [ k ] for k in ( '__author__' , '__license__' , '__version__' ) ]
everything = dict ( version = __version__ , long_description = long_description , author = __author__ , license = __license__ )
if not all ( everything . values ( ) ) :
raise ValueError ( 'Failed to obtain metadata from package/module.' )
return everything |
def _generate_event_doc ( event ) :
'''Create a object that will be saved into the database based in
options .''' | # Create a copy of the object that we will return .
eventc = event . copy ( )
# Set the ID of the document to be the JID .
eventc [ "_id" ] = '{}-{}' . format ( event . get ( 'tag' , '' ) . split ( '/' ) [ 2 ] , event . get ( 'tag' , '' ) . split ( '/' ) [ 3 ] )
# Add a timestamp field to the document
eventc [ "timestamp" ] = time . time ( )
# remove any return data as it ' s captured in the " returner " function
if eventc . get ( 'data' ) . get ( 'return' ) :
del eventc [ 'data' ] [ 'return' ]
return eventc |
def fail ( self , message = None , force_exit = False ) :
"""Marks the job as failed , saves the given error message and force exists the process when force _ exit = True .""" | global last_exit_code
if not last_exit_code :
last_exit_code = 1
with self . git . batch_commit ( 'FAILED' ) :
self . set_status ( 'FAILED' , add_section = False )
self . git . commit_json_file ( 'FAIL_MESSAGE' , 'aetros/job/crash/error' , str ( message ) if message else '' )
if isinstance ( sys . stderr , GeneralLogger ) :
self . git . commit_json_file ( 'FAIL_MESSAGE_LAST_LOG' , 'aetros/job/crash/last_message' , sys . stderr . last_messages )
self . logger . debug ( 'Crash report stored in commit ' + self . git . get_head_commit ( ) )
self . stop ( JOB_STATUS . PROGRESS_STATUS_FAILED , force_exit = force_exit ) |
def build_tree ( self ) :
"""Build chaid tree""" | self . _tree_store = [ ]
self . node ( np . arange ( 0 , self . data_size , dtype = np . int ) , self . vectorised_array , self . observed ) |
def with_user_roles ( roles ) :
"""with _ user _ roles ( roles )
It allows to check if a user has access to a view by adding the decorator
with _ user _ roles ( [ ] )
Requires flask - login
In your model , you must have a property ' role ' , which will be invoked to
be compared to the roles provided .
If current _ user doesn ' t have a role , it will throw a 403
If the current _ user is not logged in will throw a 401
* Require Flask - Login
Usage
@ app . route ( ' / user ' )
@ login _ require
@ with _ user _ roles ( [ ' admin ' , ' user ' ] )
def user _ page ( self ) :
return " You ' ve got permission to access this page . " """ | def wrapper ( f ) :
@ functools . wraps ( f )
def wrapped ( * args , ** kwargs ) :
if current_user . is_authenticated ( ) :
if not hasattr ( current_user , "role" ) :
raise AttributeError ( "<'role'> doesn't exist in login 'current_user'" )
if current_user . role not in roles :
return abort ( 403 )
else :
return abort ( 401 )
return f ( * args , ** kwargs )
return wrapped
return wrapper |
def border ( self , L ) :
"""Append to L the border of the subtree .""" | if self . shape == L_shape :
L . append ( self . value )
else :
for x in self . sons :
x . border ( L ) |
def get_transcript ( self , exon_bounds = 'max' ) :
"""Return a representative transcript object""" | out = Transcript ( )
out . junctions = [ x . get_junction ( ) for x in self . junction_groups ]
# check for single exon transcript
if len ( out . junctions ) == 0 :
leftcoord = min ( [ x . exons [ 0 ] . range . start for x in self . transcripts ] )
rightcoord = max ( [ x . exons [ - 1 ] . range . end for x in self . transcripts ] )
e = Exon ( GenomicRange ( x . exons [ 0 ] . range . chr , leftcoord , rightcoord ) )
e . set_is_leftmost ( )
e . set_is_rightmost ( )
out . exons . append ( e )
return out
# get internal exons
self . exons = [ ]
for i in range ( 0 , len ( self . junction_groups ) - 1 ) :
j1 = self . junction_groups [ i ] . get_junction ( )
j2 = self . junction_groups [ i + 1 ] . get_junction ( )
e = Exon ( GenomicRange ( j1 . right . chr , j1 . right . end , j2 . left . start ) )
e . set_left_junc ( j1 )
e . set_right_junc ( j2 )
# print str ( i ) + " to " + str ( i + 1)
out . exons . append ( e )
# get left exon
left_exons = [ y for y in [ self . transcripts [ e [ 0 ] ] . junctions [ e [ 1 ] ] . get_left_exon ( ) for e in self . junction_groups [ 0 ] . evidence ] if y ]
if len ( left_exons ) == 0 :
sys . stderr . write ( "ERROR no left exon\n" )
sys . exit ( )
e_left = Exon ( GenomicRange ( out . junctions [ 0 ] . left . chr , min ( [ x . range . start for x in left_exons ] ) , out . junctions [ 0 ] . left . start ) )
e_left . set_right_junc ( out . junctions [ 0 ] )
out . exons . insert ( 0 , e_left )
# get right exon
right_exons = [ y for y in [ self . transcripts [ e [ 0 ] ] . junctions [ e [ 1 ] ] . get_right_exon ( ) for e in self . junction_groups [ - 1 ] . evidence ] if y ]
if len ( right_exons ) == 0 :
sys . stderr . write ( "ERROR no right exon\n" )
sys . exit ( )
e_right = Exon ( GenomicRange ( out . junctions [ - 1 ] . right . chr , out . junctions [ - 1 ] . right . end , max ( [ x . range . end for x in right_exons ] ) ) )
e_right . set_left_junc ( out . junctions [ - 1 ] )
out . exons . append ( e_right )
return out |
def _write_adminfile ( kwargs ) :
'''Create a temporary adminfile based on the keyword arguments passed to
pkg . install .''' | # Set the adminfile default variables
email = kwargs . get ( 'email' , '' )
instance = kwargs . get ( 'instance' , 'quit' )
partial = kwargs . get ( 'partial' , 'nocheck' )
runlevel = kwargs . get ( 'runlevel' , 'nocheck' )
idepend = kwargs . get ( 'idepend' , 'nocheck' )
rdepend = kwargs . get ( 'rdepend' , 'nocheck' )
space = kwargs . get ( 'space' , 'nocheck' )
setuid = kwargs . get ( 'setuid' , 'nocheck' )
conflict = kwargs . get ( 'conflict' , 'nocheck' )
action = kwargs . get ( 'action' , 'nocheck' )
basedir = kwargs . get ( 'basedir' , 'default' )
# Make tempfile to hold the adminfile contents .
adminfile = salt . utils . files . mkstemp ( prefix = "salt-" )
def _write_line ( fp_ , line ) :
fp_ . write ( salt . utils . stringutils . to_str ( line ) )
with salt . utils . files . fopen ( adminfile , 'w' ) as fp_ :
_write_line ( fp_ , 'email={0}\n' . format ( email ) )
_write_line ( fp_ , 'instance={0}\n' . format ( instance ) )
_write_line ( fp_ , 'partial={0}\n' . format ( partial ) )
_write_line ( fp_ , 'runlevel={0}\n' . format ( runlevel ) )
_write_line ( fp_ , 'idepend={0}\n' . format ( idepend ) )
_write_line ( fp_ , 'rdepend={0}\n' . format ( rdepend ) )
_write_line ( fp_ , 'space={0}\n' . format ( space ) )
_write_line ( fp_ , 'setuid={0}\n' . format ( setuid ) )
_write_line ( fp_ , 'conflict={0}\n' . format ( conflict ) )
_write_line ( fp_ , 'action={0}\n' . format ( action ) )
_write_line ( fp_ , 'basedir={0}\n' . format ( basedir ) )
return adminfile |
def deserialize_bitarray ( ser ) : # type : ( str ) - > bitarray
"""Deserialize a base 64 encoded string to a bitarray ( bloomfilter )""" | ba = bitarray ( )
ba . frombytes ( base64 . b64decode ( ser . encode ( encoding = 'UTF-8' , errors = 'strict' ) ) )
return ba |
def detect_cloud ( ) -> str :
'''Detect the cloud provider where I am running on .''' | # NOTE : Contributions are welcome !
# Please add other cloud providers such as Rackspace , IBM BlueMix , etc .
if sys . platform . startswith ( 'linux' ) : # Google Cloud Platform or Amazon AWS ( hvm )
try : # AWS Nitro - based instances
mb = Path ( '/sys/devices/virtual/dmi/id/board_vendor' ) . read_text ( ) . lower ( )
if 'amazon' in mb :
return 'amazon'
except IOError :
pass
try :
bios = Path ( '/sys/devices/virtual/dmi/id/bios_version' ) . read_text ( ) . lower ( )
if 'google' in bios :
return 'google'
if 'amazon' in bios :
return 'amazon'
except IOError :
pass
# Microsoft Azure
# https : / / gallery . technet . microsoft . com / scriptcenter / Detect - Windows - Azure - aed06d51
# TODO : this only works with Debian / Ubuntu instances .
# TODO : this does not work inside containers .
try :
dhcp = Path ( '/var/lib/dhcp/dhclient.eth0.leases' ) . read_text ( )
if 'unknown-245' in dhcp :
return 'azure'
# alternative method is to read / var / lib / waagent / GoalState . 1 . xml
# but it requires sudo privilege .
except IOError :
pass
else :
log . warning ( 'Cloud detection is implemented for Linux only yet.' )
return None |
def valid_totp ( token , secret , digest_method = hashlib . sha1 , token_length = 6 , interval_length = 30 , clock = None , window = 0 , ) :
"""Check if given token is valid time - based one - time password for given
secret .
: param token : token which is being checked
: type token : int or str
: param secret : secret for which the token is being checked
: type secret : str
: param digest _ method : method of generating digest ( hashlib . sha1 by default )
: type digest _ method : callable
: param token _ length : length of the token ( 6 by default )
: type token _ length : int
: param interval _ length : length of TOTP interval ( 30 seconds by default )
: type interval _ length : int
: param clock : time in epoch seconds to generate totp for , default is now
: type clock : int
: param window : compensate for clock skew , number of intervals to check on
each side of the current time . ( default is 0 - only check the current
clock time )
: type window : int ( positive )
: return : True , if is valid token , False otherwise
: rtype : bool
> > > secret = b ' MFRGGZDFMZTWQ2LK '
> > > token = get _ totp ( secret )
> > > valid _ totp ( token , secret )
True
> > > valid _ totp ( token + 1 , secret )
False
> > > token = get _ totp ( secret , as _ string = True )
> > > valid _ totp ( token , secret )
True
> > > valid _ totp ( token + b ' 1 ' , secret )
False""" | if _is_possible_token ( token , token_length = token_length ) :
if clock is None :
clock = time . time ( )
for w in range ( - window , window + 1 ) :
if int ( token ) == get_totp ( secret , digest_method = digest_method , token_length = token_length , interval_length = interval_length , clock = int ( clock ) + ( w * interval_length ) ) :
return True
return False |
def pbf ( self , bbox , geo_col = None , scale = 4096 ) :
"""Returns tranlated and scaled geometries suitable for Mapbox vector
tiles .""" | col = geo_col or self . geo_field . name
w , s , e , n = bbox . extent
trans = self . _trans_scale ( col , - w , - s , scale / ( e - w ) , scale / ( n - s ) )
g = AsText ( trans )
return self . annotate ( pbf = g ) |
def _get_parsimonious_model ( models ) :
"""Return the most parsimonious model of all available models . The most
parsimonious model is defined as the model with the fewest number of
parameters .""" | params = [ len ( model . output [ "parameters" ] ) for model in models ]
idx = params . index ( min ( params ) )
return models [ idx ] |
def always_fails ( self , work_dict ) :
"""always _ fails
: param work _ dict : dictionary for key / values""" | label = "always_fails"
log . info ( ( "task - {} - start " "work_dict={}" ) . format ( label , work_dict ) )
raise Exception ( work_dict . get ( "test_failure" , "simulating a failure" ) )
log . info ( ( "task - {} - done" ) . format ( label ) )
return True |
def send ( self , obj , encoding = 'utf-8' ) :
"""Sends a python object to the backend . The object * * must be JSON
serialisable * * .
: param obj : object to send
: param encoding : encoding used to encode the json message into a
bytes array , this should match CodeEdit . file . encoding .""" | comm ( 'sending request: %r' , obj )
msg = json . dumps ( obj )
msg = msg . encode ( encoding )
header = struct . pack ( '=I' , len ( msg ) )
self . write ( header )
self . write ( msg ) |
def print_extended_help ( ) :
"""Prints an extended help message .""" | # initiate TextWrapper class , which will handle all of the string formatting
w = textwrap . TextWrapper ( )
w . expand_tabs = False
w . width = 110
w . initial_indent = ' '
w . subsequent_indent = ' '
print ( '' )
print ( textwrap . fill ( "<split> Complete parameter list:" , initial_indent = '' ) )
print ( '' )
cmd = "--input : (required) csv file to split into training and test sets"
print ( w . fill ( cmd ) )
cmd = "\t\tColumns should be as follows:"
print ( w . fill ( cmd ) )
print ( '' )
cmd = "\t\t id, status, receptor_1, receptor_2, ..., receptor_N"
print ( w . fill ( cmd ) )
cmd = "\t\t CH44, 1, -9.7, -9.3, ..., -10.2"
print ( w . fill ( cmd ) )
cmd = "\t\t ZN44, 0, -6.6, -6.1, ..., -6.8"
print ( w . fill ( cmd ) )
print ( '' )
cmd = "\t\tid is a unique molecular identifier"
print ( w . fill ( cmd ) )
cmd = "\t\tstatus takes a value of '1' if the molecule is active and '0' otherwise."
print ( w . fill ( cmd ) )
cmd = "\t\treceptor_1 through receptor_N are docking scores."
print ( w . fill ( cmd ) )
print ( '' )
tfrac = "--training_fraction : (optional) The fraction of input active molecules\
allocated to the training set, e.g. 0.40. Defaults to allocate half to the training\
set."
print ( w . fill ( tfrac ) )
print ( '' )
d2a = "--decoy_to_active : (optional) The decoy to active ratio to establish in the \
training and validation sets. Defaults to maintain the input file ratio."
print ( w . fill ( d2a ) )
print ( '' ) |
def refresh ( self ) :
"""Update this ItemList by re - downloading it from the server
: rtype : ItemList
: returns : this ItemList , after the refresh
: raises : APIError if the API request is not successful""" | refreshed = self . client . get_item_list ( self . url ( ) )
self . item_urls = refreshed . urls ( )
self . list_name = refreshed . name ( )
return self |
def _parse_packet ( rawdata ) :
"""Returns a tupel ( opcode , minusconf - data ) . opcode is None if this isn ' t a - conf packet .""" | if ( len ( rawdata ) < len ( _MAGIC ) + 1 ) or ( _MAGIC != rawdata [ : len ( _MAGIC ) ] ) : # Wrong protocol
return ( None , None )
opcode = rawdata [ len ( _MAGIC ) : len ( _MAGIC ) + 1 ]
payload = rawdata [ len ( _MAGIC ) + 1 : ]
return ( opcode , payload ) |
def _create_rule ( rule , index , backtrack ) : # type : ( Rule , int , Dict [ Type [ Nonterminal ] , Type [ Rule ] ] ) - > Type [ EpsilonRemovedRule ]
"""Create EpsilonRemovedRule . This rule will skip symbol at the ` index ` .
: param rule : Original rule .
: param index : Index of symbol that is rewritable to epsilon .
: param backtrack : Dictionary where key is nonterminal and value is rule which is next to generate epsilon .
: return : EpsilonRemovedRule class without symbol rewritable to epsilon .""" | # remove old rules from the dictionary
old_dict = rule . __dict__ . copy ( )
if 'rules' in old_dict :
del old_dict [ 'rules' ]
if 'rule' in old_dict :
del old_dict [ 'rule' ]
if 'left' in old_dict :
del old_dict [ 'left' ]
if 'right' in old_dict :
del old_dict [ 'right' ]
if 'fromSymbol' in old_dict :
del old_dict [ 'fromSymbol' ]
if 'toSymbol' in old_dict :
del old_dict [ 'toSymbol' ]
# create type
created = type ( 'NoEps[' + rule . __name__ + ']' , ( EpsilonRemovedRule , ) , old_dict )
# type : Type [ EpsilonRemovedRule ]
# add from _ rule and index
created . from_rule = rule
created . replace_index = index
created . backtrack = backtrack
# attach rule
created . fromSymbol = rule . fromSymbol
created . right = [ rule . right [ i ] for i in range ( len ( rule . right ) ) if i != index ]
# ff the right side is empty
if len ( created . right ) == 0 :
created . right = [ EPSILON ]
return created |
def choose_path ( ) :
"""Invoke a folder selection dialog here
: return :""" | dirs = webview . create_file_dialog ( webview . FOLDER_DIALOG )
if dirs and len ( dirs ) > 0 :
directory = dirs [ 0 ]
if isinstance ( directory , bytes ) :
directory = directory . decode ( "utf-8" )
response = { "status" : "ok" , "directory" : directory }
else :
response = { "status" : "cancel" }
return jsonify ( response ) |
def filter ( self , value = None , model = None , context = None ) :
"""Sequentially applies all the filters to provided value
: param value : a value to filter
: param model : parent entity
: param context : filtering context , usually parent entity
: return : filtered value""" | if value is None :
return value
for filter_obj in self . filters :
value = filter_obj . filter ( value = value , model = model , context = context if self . use_context else None )
return value |
def auth ( username , password ) :
'''REST authentication''' | url = rest_auth_setup ( )
data = { 'username' : username , 'password' : password }
# Post to the API endpoint . If 200 is returned then the result will be the ACLs
# for this user
result = salt . utils . http . query ( url , method = 'POST' , data = data , status = True , decode = True )
if result [ 'status' ] == 200 :
log . debug ( 'eauth REST call returned 200: %s' , result )
if result [ 'dict' ] is not None :
return result [ 'dict' ]
return True
else :
log . debug ( 'eauth REST call failed: %s' , result )
return False |
def get_execution_engine ( name ) :
"""Get the execution engine by name .""" | manager = driver . DriverManager ( namespace = 'cosmic_ray.execution_engines' , name = name , invoke_on_load = True , on_load_failure_callback = _log_extension_loading_failure , )
return manager . driver |
def extract_command ( outputdir , domain_methods , text_domain , keywords , comment_tags , base_dir , project , version , msgid_bugs_address ) :
"""Extracts strings into . pot files
: arg domain : domains to generate strings for or ' all ' for all domains
: arg outputdir : output dir for . pot files ; usually
locale / templates / LC _ MESSAGES /
: arg domain _ methods : DOMAIN _ METHODS setting
: arg text _ domain : TEXT _ DOMAIN settings
: arg keywords : KEYWORDS setting
: arg comment _ tags : COMMENT _ TAGS setting
: arg base _ dir : BASE _ DIR setting
: arg project : PROJECT setting
: arg version : VERSION setting
: arg msgid _ bugs _ address : MSGID _ BUGS _ ADDRESS setting""" | # Must monkeypatch first to fix i18n extensions stomping issues !
monkeypatch_i18n ( )
# Create the outputdir if it doesn ' t exist
outputdir = os . path . abspath ( outputdir )
if not os . path . isdir ( outputdir ) :
print ( 'Creating output dir %s ...' % outputdir )
os . makedirs ( outputdir )
domains = domain_methods . keys ( )
def callback ( filename , method , options ) :
if method != 'ignore' :
print ( ' %s' % filename )
# Extract string for each domain
for domain in domains :
print ( 'Extracting all strings in domain %s...' % domain )
methods = domain_methods [ domain ]
catalog = Catalog ( header_comment = '' , project = project , version = version , msgid_bugs_address = msgid_bugs_address , charset = 'utf-8' , )
extracted = extract_from_dir ( base_dir , method_map = methods , options_map = generate_options_map ( ) , keywords = keywords , comment_tags = comment_tags , callback = callback , )
for filename , lineno , msg , cmts , ctxt in extracted :
catalog . add ( msg , None , [ ( filename , lineno ) ] , auto_comments = cmts , context = ctxt )
with open ( os . path . join ( outputdir , '%s.pot' % domain ) , 'wb' ) as fp :
write_po ( fp , catalog , width = 80 )
print ( 'Done' ) |
def _create_from_java_class ( cls , java_class , * args ) :
"""Construct this object from given Java classname and arguments""" | java_obj = JavaWrapper . _new_java_obj ( java_class , * args )
return cls ( java_obj ) |
def _transfer_result ( fut1 , fut2 ) :
"""Helper to transfer result or errors from one Future to another .""" | exc = fut1 . get_exception ( )
if exc is not None :
tb = fut1 . get_traceback ( )
fut2 . set_exception ( exc , tb )
else :
val = fut1 . get_result ( )
fut2 . set_result ( val ) |
def dot ( self , y , t = None , A = None , U = None , V = None , kernel = None , check_sorted = True ) :
"""Dot the covariance matrix into a vector or matrix
Compute ` ` K . y ` ` where ` ` K ` ` is the covariance matrix of the GP without
the white noise or ` ` yerr ` ` values on the diagonal .
Args :
y ( array [ n ] or array [ n , nrhs ] ) : The vector or matrix ` ` y ` `
described above .
kernel ( Optional [ terms . Term ] ) : A different kernel can optionally
be provided to compute the matrix ` ` K ` ` from a different
kernel than the ` ` kernel ` ` property on this object .
Returns :
array [ n ] or array [ n , nrhs ] : The dot product ` ` K . y ` ` as described
above . This will have the same shape as ` ` y ` ` .
Raises :
ValueError : For mismatched dimensions .""" | if kernel is None :
kernel = self . kernel
if t is not None :
t = np . atleast_1d ( t )
if check_sorted and np . any ( np . diff ( t ) < 0.0 ) :
raise ValueError ( "the input coordinates must be sorted" )
if check_sorted and len ( t . shape ) > 1 :
raise ValueError ( "dimension mismatch" )
A = np . empty ( 0 ) if A is None else A
U = np . empty ( ( 0 , 0 ) ) if U is None else U
V = np . empty ( ( 0 , 0 ) ) if V is None else V
else :
if not self . computed :
raise RuntimeError ( "you must call 'compute' first" )
t = self . _t
A = self . _A
U = self . _U
V = self . _V
( alpha_real , beta_real , alpha_complex_real , alpha_complex_imag , beta_complex_real , beta_complex_imag ) = kernel . coefficients
return self . solver . dot ( kernel . jitter , alpha_real , beta_real , alpha_complex_real , alpha_complex_imag , beta_complex_real , beta_complex_imag , A , U , V , t , np . ascontiguousarray ( y , dtype = float ) ) |
def resume_service ( name ) :
"""Resume the service given by name .
@ warn : This method requires UAC elevation in Windows Vista and above .
@ note : Not all services support this .
@ see : L { get _ services } , L { get _ active _ services } ,
L { start _ service } , L { stop _ service } , L { pause _ service }""" | with win32 . OpenSCManager ( dwDesiredAccess = win32 . SC_MANAGER_CONNECT ) as hSCManager :
with win32 . OpenService ( hSCManager , name , dwDesiredAccess = win32 . SERVICE_PAUSE_CONTINUE ) as hService :
win32 . ControlService ( hService , win32 . SERVICE_CONTROL_CONTINUE ) |
def run_job ( key , node ) :
"""Run a job . This applies the function node , and returns a | ResultMessage |
when complete . If an exception is raised in the job , the | ResultMessage |
will have ` ` ' error ' ` ` status .
. . | run _ job | replace : : : py : func : ` run _ job `""" | try :
result = node . apply ( )
return ResultMessage ( key , 'done' , result , None )
except Exception as exc :
return ResultMessage ( key , 'error' , None , exc ) |
def message_search ( self , text , on_success , peer = None , min_date = None , max_date = None , max_id = None , offset = 0 , limit = 255 ) :
"""Unsupported in the Bot API""" | raise TWXUnsupportedMethod ( ) |
def email ( self ) :
"""User email ( s )""" | try :
return self . parser . get ( "general" , "email" )
except NoSectionError as error :
log . debug ( error )
raise ConfigFileError ( "No general section found in the config file." )
except NoOptionError as error :
log . debug ( error )
raise ConfigFileError ( "No email address defined in the config file." ) |
def parameter_present ( name , db_parameter_group_family , description , parameters = None , apply_method = "pending-reboot" , tags = None , region = None , key = None , keyid = None , profile = None ) :
'''Ensure DB parameter group exists and update parameters .
name
The name for the parameter group .
db _ parameter _ group _ family
The DB parameter group family name . A
DB parameter group can be associated with one and only one DB
parameter group family , and can be applied only to a DB instance
running a database engine and engine version compatible with that
DB parameter group family .
description
Parameter group description .
parameters
The DB parameters that need to be changed of type dictionary .
apply _ method
The ` apply - immediate ` method can be used only for dynamic
parameters ; the ` pending - reboot ` method can be used with MySQL
and Oracle DB instances for either dynamic or static
parameters . For Microsoft SQL Server DB instances , the
` pending - reboot ` method can be used only for static
parameters .
tags
A dict of tags .
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string ) that
contains a dict with region , key and keyid .''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
res = __salt__ [ 'boto_rds.parameter_group_exists' ] ( name = name , tags = tags , region = region , key = key , keyid = keyid , profile = profile )
if not res . get ( 'exists' ) :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Parameter group {0} is set to be created.' . format ( name )
ret [ 'result' ] = None
return ret
created = __salt__ [ 'boto_rds.create_parameter_group' ] ( name = name , db_parameter_group_family = db_parameter_group_family , description = description , tags = tags , region = region , key = key , keyid = keyid , profile = profile )
if not created :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create {0} parameter group.' . format ( name )
return ret
ret [ 'changes' ] [ 'New Parameter Group' ] = name
ret [ 'comment' ] = 'Parameter group {0} created.' . format ( name )
else :
ret [ 'comment' ] = 'Parameter group {0} present.' . format ( name )
if parameters is not None :
params = { }
changed = { }
for items in parameters :
for k , value in items . items ( ) :
if type ( value ) is bool :
params [ k ] = 'on' if value else 'off'
else :
params [ k ] = six . text_type ( value )
log . debug ( 'Parameters from user are : %s.' , params )
options = __salt__ [ 'boto_rds.describe_parameters' ] ( name = name , region = region , key = key , keyid = keyid , profile = profile )
if not options . get ( 'result' ) :
ret [ 'result' ] = False
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'Faled to get parameters for group {0}.' . format ( name ) ] )
return ret
for parameter in options [ 'parameters' ] . values ( ) :
if parameter [ 'ParameterName' ] in params and params . get ( parameter [ 'ParameterName' ] ) != six . text_type ( parameter [ 'ParameterValue' ] ) :
log . debug ( 'Values that are being compared for %s are %s:%s.' , parameter [ 'ParameterName' ] , params . get ( parameter [ 'ParameterName' ] ) , parameter [ 'ParameterValue' ] )
changed [ parameter [ 'ParameterName' ] ] = params . get ( parameter [ 'ParameterName' ] )
if changed :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'Parameters {0} for group {1} are set to be changed.' . format ( changed , name ) ] )
ret [ 'result' ] = None
return ret
update = __salt__ [ 'boto_rds.update_parameter_group' ] ( name , parameters = changed , apply_method = apply_method , tags = tags , region = region , key = key , keyid = keyid , profile = profile )
if 'error' in update :
ret [ 'result' ] = False
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'Failed to change parameters {0} for group {1}:' . format ( changed , name ) , update [ 'error' ] [ 'message' ] ] )
return ret
ret [ 'changes' ] [ 'Parameters' ] = changed
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'Parameters {0} for group {1} are changed.' . format ( changed , name ) ] )
else :
ret [ 'comment' ] = os . linesep . join ( [ ret [ 'comment' ] , 'Parameters {0} for group {1} are present.' . format ( params , name ) ] )
return ret |
def minizinc ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'dict' , solver = None , timeout = None , two_pass = None , pre_passes = None , output_objective = False , non_unique = False , all_solutions = False , num_solutions = None , free_search = False , parallel = None , seed = None , rebase_arrays = True , keep_solutions = True , return_enums = False , ** kwargs ) :
"""Implements the workflow for solving a CSP problem encoded with MiniZinc .
Parameters
mzn : str
The minizinc model . This can be either the path to the ` ` . mzn ` ` file or
the content of the model itself .
* dzn _ files
A list of paths to dzn files to attach to the minizinc execution ,
provided as positional arguments ; by default no data file is attached .
args : dict
Arguments for the template engine .
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable . The dictionary is automatically converted to
dzn format by the ` ` pymzn . dict2dzn ` ` function .
include : str or list
One or more additional paths to search for included ` ` . mzn ` ` files .
stdlib _ dir : str
The path to the MiniZinc standard library . Provide it only if it is
different from the default one .
globals _ dir : str
The path to the MiniZinc globals directory . Provide it only if it is
different from the default one .
declare _ enums : bool
Whether to declare enum types when converting inline data into dzn
format . If the enum types are declared elsewhere this option should be
False . Default is ` ` True ` ` .
allow _ multiple _ assignments : bool
Whether to allow multiple assignments of variables . Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file . Default is ` ` False ` ` .
keep : bool
Whether to keep the generated ` ` . mzn ` ` , ` ` . dzn ` ` , ` ` . fzn ` ` and ` ` . ozn ` `
files or not . If False , the generated files are created as temporary
files which will be deleted right after the problem is solved . Though
files generated by PyMzn are not intended to be kept , this property can
be used for debugging purpose . Note that in case of error the files are
not deleted even if this parameter is ` ` False ` ` . Default is ` ` False ` ` .
output _ vars : list of str
A list of output variables . These variables will be the ones included in
the output dictionary . Only available if ` ` ouptut _ mode = ' dict ' ` ` .
output _ base : str
Output directory for the files generated by PyMzn . The default
( ` ` None ` ` ) is the temporary directory of your OS ( if ` ` keep = False ` ` ) or
the current working directory ( if ` ` keep = True ` ` ) .
output _ mode : { ' dict ' , ' item ' , ' dzn ' , ' json ' , ' raw ' }
The desired output format . The default is ` ` ' dict ' ` ` which returns a
stream of solutions decoded as python dictionaries . The ` ` ' item ' ` `
format outputs a stream of strings as returned by the ` ` solns2out ` `
tool , formatted according to the output statement of the MiniZinc model .
The ` ` ' dzn ' ` ` and ` ` ' json ' ` ` formats output a stream of strings
formatted in dzn of json respectively . The ` ` ' raw ' ` ` format , instead
returns the whole solution stream , without parsing .
solver : Solver
The ` ` Solver ` ` instance to use . The default solver is ` ` gecode ` ` .
timeout : int
The timeout in seconds for the flattening + solving process .
two _ pass : bool or int
If ` ` two _ pass ` ` is True , then it is equivalent to the ` ` - - two - pass ` `
option for the ` ` minizinc ` ` executable . If ` ` two _ pass ` ` is an integer
` ` < n > ` ` , instead , it is equivalent to the ` ` - O < n > ` ` option for the
` ` minizinc ` ` executable .
pre _ passes : int
Equivalent to the ` ` - - pre - passes ` ` option for the ` ` minizinc ` `
executable .
output _ objective : bool
Equivalent to the ` ` - - output - objective ` ` option for the ` ` minizinc ` `
executable . Adds a field ` ` _ objective ` ` to all solutions .
non _ unique : bool
Equivalent to the ` ` - - non - unique ` ` option for the ` ` minizinc ` `
executable .
all _ solutions : bool
Whether all the solutions must be returned . This option might not work
if the solver does not support it . Default is ` ` False ` ` .
num _ solutions : int
The upper bound on the number of solutions to be returned . This option
might not work if the solver does not support it . Default is ` ` 1 ` ` .
free _ search : bool
If ` ` True ` ` , instruct the solver to perform free search .
parallel : int
The number of parallel threads the solver can utilize for the solving .
seed : int
The random number generator seed to pass to the solver .
rebase _ arrays : bool
Whether to " rebase " parsed arrays ( see the ` Dzn files
< http : / / paolodragone . com / pymzn / reference / dzn > ` _ _ section ) . Default is
True .
keep _ solutions : bool
Whether to store the solutions in memory after solving is done . If
` ` keep _ solutions ` ` is ` ` False ` ` , the returned solution stream can only
be iterated once and cannot be addressed as a list .
return _ enums : bool
Wheter to return enum types along with the variable assignments in the
solutions . Only used if ` ` output _ mode = ' dict ' ` ` . Default is ` ` False ` ` .
* * kwargs
Additional arguments to pass to the solver , provided as additional
keyword arguments to this function . Check the solver documentation for
the available arguments .
Returns
Solutions or str
If ` ` output _ mode ` ` is not ` ` ' raw ' ` ` , returns a list - like object
containing the solutions found by the solver . The format of the solution
depends on the specified ` ` output _ mode ` ` . If ` ` keep _ solutions = False ` ` ,
the returned object cannot be addressed as a list and can only be
iterated once . If ` ` output _ mode = ' raw ' ` ` , the function returns the whole
solution stream as a single string .""" | mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments )
if not solver :
solver = config . get ( 'solver' , gecode )
solver_args = { ** kwargs , ** config . get ( 'solver_args' , { } ) }
proc = solve ( solver , mzn_file , * dzn_files , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = _output_mode , timeout = timeout , two_pass = two_pass , pre_passes = pre_passes , output_objective = output_objective , non_unique = non_unique , all_solutions = all_solutions , num_solutions = num_solutions , free_search = free_search , parallel = parallel , seed = seed , allow_multiple_assignments = allow_multiple_assignments , ** solver_args )
if not keep :
_cleanup ( [ mzn_file , data_file ] )
if output_mode == 'raw' :
return proc . stdout_data
parser = SolutionParser ( solver , output_mode = output_mode , rebase_arrays = rebase_arrays , types = types , keep_solutions = keep_solutions , return_enums = return_enums )
solns = parser . parse ( proc )
return solns |
def main ( start , end , out ) :
"""Scrape a MLBAM Data
: param start : Start Day ( YYYYMMDD )
: param end : End Day ( YYYYMMDD )
: param out : Output directory ( default : " . . / output / mlb " )""" | try :
logging . basicConfig ( level = logging . WARNING )
MlbAm . scrape ( start , end , out )
except MlbAmBadParameter as e :
raise click . BadParameter ( e ) |
def must_contain ( self , value , q , strict = False ) :
"""if value must contain q""" | if value is not None :
if value . find ( q ) != - 1 :
return
self . shout ( 'Value %r does not contain %r' , strict , value , q ) |
def success ( request , message , extra_tags = '' , fail_silently = False , async = False ) :
"""Adds a message with the ` ` SUCCESS ` ` level .""" | if ASYNC and async :
messages . success ( _get_user ( request ) , message )
else :
add_message ( request , constants . SUCCESS , message , extra_tags = extra_tags , fail_silently = fail_silently ) |
def create_index_table ( environ , envdir ) :
'''create an html table
Parameters :
environ ( dict ) :
A tree environment dictionary
envdir ( str ) :
The filepath for the env directory
Returns :
An html table definition string''' | table_header = """<table id="list" cellpadding="0.1em" cellspacing="0">
<colgroup><col width="55%"/><col width="20%"/><col width="25%"/></colgroup>
<thead>
<tr><th><a href="?C=N&O=A">File Name</a> <a href="?C=N&O=D"> ↓ </a></th><th><a href="?C=S&O=A">File Size</a> <a href="?C=S&O=D"> ↓ </a></th><th><a href="?C=M&O=A">Date</a> <a href="?C=M&O=D"> ↓ </a></th></tr>
</thead><tbody>
<tr><td><a href="../">Parent directory/</a></td><td>-</td><td>-</td></tr>"""
table_footer = """</tbody></table>"""
# create table
table = table_header
# loop over the environment
for section , values in environ . items ( ) :
if section == 'default' :
continue
for tree_name , tree_path in values . items ( ) :
skipmsg = 'Skipping {0} for {1}' . format ( tree_name , section )
if '_root' in tree_name :
continue
# create the src and target links
src = tree_path
link = os . path . join ( envdir , tree_name . upper ( ) )
# get the local time of the symlink
try :
stattime = time . strftime ( '%d-%b-%Y %H:%M' , time . localtime ( os . stat ( src ) . st_mtime ) )
except OSError :
print ( "{0} does not appear to exist, skipping..." . format ( src ) )
_remove_link ( link )
continue
# skip the sas _ base _ dir
if section == 'general' and 'sas_base_dir' in tree_name :
print ( skipmsg )
continue
# only create symlinks
if section == 'general' and tree_name in [ 'cas_load' , 'staging_data' ] : # only create links here if the target exist
if os . path . exists ( src ) :
make_symlink ( src , link )
else :
print ( skipmsg )
else :
print ( 'Processing {0} for {1}' . format ( tree_name , section ) )
make_symlink ( src , link )
# create the table entry
if os . path . exists ( link ) :
table += ' <tr><td><a href="{0}/">{0}/</a></td><td>-</td><td>{1}</td></tr>\n' . format ( tree_name . upper ( ) , stattime )
table += table_footer
return table |
def send_password_changed_email ( self , user ) :
"""Send the ' password has changed ' notification email .""" | # Verify config settings
if not self . user_manager . USER_ENABLE_EMAIL :
return
if not self . user_manager . USER_SEND_PASSWORD_CHANGED_EMAIL :
return
# Notification emails are sent to the user ' s primary email address
user_or_user_email_object = self . user_manager . db_manager . get_primary_user_email_object ( user )
email = user_or_user_email_object . email
# Render email from templates and send it via the configured EmailAdapter
self . _render_and_send_email ( email , user , self . user_manager . USER_PASSWORD_CHANGED_EMAIL_TEMPLATE , ) |
def register_factory ( self , key , factory = _sentinel , scope = NoneScope , allow_overwrite = False ) :
'''Creates and registers a provider using the given key , factory , and scope .
Can also be used as a decorator .
: param key : Provider key
: type key : object
: param factory : Factory callable
: type factory : callable
: param scope : Scope key , factory , or instance
: type scope : object or callable
: return : Factory ( or None if we ' re creating a provider without a factory )
: rtype : callable or None''' | if factory is _sentinel :
return functools . partial ( self . register_factory , key , scope = scope , allow_overwrite = allow_overwrite )
if not allow_overwrite and key in self . _providers :
raise KeyError ( "Key %s already exists" % key )
provider = self . provider ( factory , scope )
self . _providers [ key ] = provider
return factory |
def folder_create ( self , foldername = None , parent_key = None , action_on_duplicate = None , mtime = None ) :
"""folder / create
http : / / www . mediafire . com / developers / core _ api / 1.3 / folder / # create""" | return self . request ( 'folder/create' , QueryParams ( { 'foldername' : foldername , 'parent_key' : parent_key , 'action_on_duplicate' : action_on_duplicate , 'mtime' : mtime } ) ) |
def find_frame_urls ( self , site , frametype , gpsstart , gpsend , match = None , urltype = None , on_gaps = "warn" ) :
"""Find the framefiles for the given type in the [ start , end ) interval
frame
@ param site :
single - character name of site to match
@ param frametype :
name of frametype to match
@ param gpsstart :
integer GPS start time of query
@ param gpsend :
integer GPS end time of query
@ param match :
regular expression to match against
@ param urltype :
file scheme to search for ( e . g . ' file ' )
@ param on _ gaps :
what to do when the requested frame isn ' t found , one of :
- C { ' warn ' } ( default ) : print a warning ,
- C { ' error ' } : raise an L { RuntimeError } , or
- C { ' ignore ' } : do nothing
@ type site : L { str }
@ type frametype : L { str }
@ type gpsstart : L { int }
@ type gpsend : L { int }
@ type match : L { str }
@ type urltype : L { str }
@ type on _ gaps : L { str }
@ returns : L { Cache < pycbc _ glue . lal . Cache > }
@ raises RuntimeError : if gaps are found and C { on _ gaps = ' error ' }""" | if on_gaps not in ( "warn" , "error" , "ignore" ) :
raise ValueError ( "on_gaps must be 'warn', 'error', or 'ignore'." )
url = ( "%s/gwf/%s/%s/%s,%s" % ( _url_prefix , site , frametype , gpsstart , gpsend ) )
# if a URL type is specified append it to the path
if urltype :
url += "/%s" % urltype
# request JSON output
url += ".json"
# append a regex if input
if match :
url += "?match=%s" % match
# make query
response = self . _requestresponse ( "GET" , url )
urllist = decode ( response . read ( ) )
out = lal . Cache ( [ lal . CacheEntry . from_T050017 ( x , coltype = self . LIGOTimeGPSType ) for x in urllist ] )
if on_gaps == "ignore" :
return out
else :
span = segments . segment ( gpsstart , gpsend )
seglist = segments . segmentlist ( e . segment for e in out ) . coalesce ( )
missing = ( segments . segmentlist ( [ span ] ) - seglist ) . coalesce ( )
if span in seglist :
return out
else :
msg = "Missing segments: \n%s" % "\n" . join ( map ( str , missing ) )
if on_gaps == "warn" :
sys . stderr . write ( "%s\n" % msg )
return out
else :
raise RuntimeError ( msg ) |
def write_copy_button ( self , text , text_to_copy ) :
"""Writes a button with ' text ' which can be used
to copy ' text _ to _ copy ' to clipboard when it ' s clicked .""" | self . write_copy_script = True
self . write ( '<button onclick="cp(\'{}\');">{}</button>' . format ( text_to_copy , text ) ) |
def to_dict ( self , prefix = None ) :
'''Converts recursively the Config object into a valid dictionary .
: param prefix : A string to optionally prefix all key elements in the
returned dictonary .''' | conf_obj = dict ( self )
return self . __dictify__ ( conf_obj , prefix ) |
def _set_cluster_id ( self , v , load = False ) :
"""Setter method for cluster _ id , mapped from YANG variable / routing _ system / router / router _ bgp / router _ bgp _ attributes / cluster _ id ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ cluster _ id is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ cluster _ id ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = cluster_id . cluster_id , is_container = 'container' , presence = False , yang_name = "cluster-id" , rest_name = "cluster-id" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure Route-Reflector Cluster-ID' } } , namespace = 'urn:brocade.com:mgmt:brocade-bgp' , defining_module = 'brocade-bgp' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """cluster_id must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=cluster_id.cluster_id, is_container='container', presence=False, yang_name="cluster-id", rest_name="cluster-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Route-Reflector Cluster-ID'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='container', is_config=True)""" , } )
self . __cluster_id = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def extractLocalParameters ( self , dna , bp , helical = False , frames = None ) :
"""Extract the local parameters for calculations
. . currentmodule : : dnaMD
Parameters
dna : : class : ` dnaMD . DNA `
Input : class : ` dnaMD . DNA ` instance .
bp : list
List of two base - steps forming the DNA segment .
For example : with ` ` bp = [ 5 , 50 ] ` ` , 5-50 base - step segment will be considered .
frames : list
List of two trajectory frames between which parameters will be extracted . It can be used to select portions
of the trajectory . For example , with ` ` frames = [ 100 , 1000 ] ` ` , 100th to 1000th frame of the trajectory will be
considered .
helical : bool
If ` ` helical = True ` ` , helical base - step parameters are extracted . Otherwise , by default , base - step parameters
are extracted .
Returns
time : numpy . ndarray
1D numpy array of shape ( nframes ) containing time
array : numpy . ndarray
2D numpy array of shape ( 6 , nframes ) containing extracted parameters .""" | frames = self . _validateFrames ( frames )
if frames [ 1 ] == - 1 :
frames [ 1 ] = None
if ( len ( bp ) != 2 ) :
raise ValueError ( "bp should be a list containing first and last bp of a segment. See, documentation!!!" )
if bp [ 0 ] > bp [ 1 ] :
raise ValueError ( "bp should be a list containing first and last bp of a segment. See, documentation!!!" )
if ( bp [ 1 ] - bp [ 0 ] ) > 4 :
print ( "WARNING: this is a local property and therefore, longer than 4 base-step may not be suitable..." )
if not helical :
time , shift = dna . time_vs_parameter ( 'shift' , bp = bp , merge = True , merge_method = 'sum' )
shift = np . asarray ( shift ) * 0.1
# conversion to nm
time , slide = dna . time_vs_parameter ( 'slide' , bp = bp , merge = True , merge_method = 'sum' )
slide = np . asarray ( slide ) * 0.1
# conversion to nm
time , rise = dna . time_vs_parameter ( 'rise' , bp = bp , merge = True , merge_method = 'sum' )
rise = np . asarray ( rise ) * 0.1
# conversion to nm
time , tilt = dna . time_vs_parameter ( 'tilt' , bp = bp , merge = True , merge_method = 'sum' )
time , roll = dna . time_vs_parameter ( 'roll' , bp = bp , merge = True , merge_method = 'sum' )
time , twist = dna . time_vs_parameter ( 'twist' , bp = bp , merge = True , merge_method = 'sum' )
array = np . array ( [ shift [ frames [ 0 ] : frames [ 1 ] ] , slide [ frames [ 0 ] : frames [ 1 ] ] , rise [ frames [ 0 ] : frames [ 1 ] ] , tilt [ frames [ 0 ] : frames [ 1 ] ] , roll [ frames [ 0 ] : frames [ 1 ] ] , twist [ frames [ 0 ] : frames [ 1 ] ] ] )
time = time [ frames [ 0 ] : frames [ 1 ] ]
else :
time , x_disp = dna . time_vs_parameter ( 'x-disp' , bp = bp , merge = True , merge_method = 'sum' )
x_disp = np . asarray ( x_disp ) * 0.1
# conversion to nm
time , y_disp = dna . time_vs_parameter ( 'y-disp' , bp = bp , merge = True , merge_method = 'sum' )
y_disp = np . asarray ( y_disp ) * 0.1
# conversion to nm
time , h_rise = dna . time_vs_parameter ( 'h-rise' , bp = bp , merge = True , merge_method = 'sum' )
h_rise = np . asarray ( h_rise ) * 0.1
# conversion to nm
time , inclination = dna . time_vs_parameter ( 'inclination' , bp = bp , merge = True , merge_method = 'sum' )
time , tip = dna . time_vs_parameter ( 'tip' , bp = bp , merge = True , merge_method = 'sum' )
time , h_twist = dna . time_vs_parameter ( 'h-twist' , bp = bp , merge = True , merge_method = 'sum' )
array = np . array ( [ x_disp [ frames [ 0 ] : frames [ 1 ] ] , y_disp [ frames [ 0 ] : frames [ 1 ] ] , h_rise [ frames [ 0 ] : frames [ 1 ] ] , inclination [ frames [ 0 ] : frames [ 1 ] ] , tip [ frames [ 0 ] : frames [ 1 ] ] , h_twist [ frames [ 0 ] : frames [ 1 ] ] ] )
time = time [ frames [ 0 ] : frames [ 1 ] ]
return time , array |
def resolve_solar ( self , year : int ) -> date :
"""Return the date object in a solar year .
: param year :
: return :""" | if self . date_class == date :
return self . resolve ( year )
else :
solar_date = LCalendars . cast_date ( self . resolve ( year ) , date )
offset = solar_date . year - year
if offset :
solar_date = LCalendars . cast_date ( self . resolve ( year - offset ) , date )
return solar_date |
def ldap_server_host_retries ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
ldap_server = ET . SubElement ( config , "ldap-server" , xmlns = "urn:brocade.com:mgmt:brocade-aaa" )
host = ET . SubElement ( ldap_server , "host" )
hostname_key = ET . SubElement ( host , "hostname" )
hostname_key . text = kwargs . pop ( 'hostname' )
use_vrf_key = ET . SubElement ( host , "use-vrf" )
use_vrf_key . text = kwargs . pop ( 'use_vrf' )
retries = ET . SubElement ( host , "retries" )
retries . text = kwargs . pop ( 'retries' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _connect ( self ) :
"""Connect to JLigier""" | log . debug ( "Connecting to JLigier" )
self . socket = socket . socket ( )
self . socket . connect ( ( self . host , self . port ) ) |
def putf ( self , path , format , * args ) :
"""Equivalent to zconfig _ put , accepting a format specifier and variable
argument list , instead of a single string value .""" | return lib . zconfig_putf ( self . _as_parameter_ , path , format , * args ) |
def _fix_file ( parameters ) :
"""Helper function for optionally running fix _ file ( ) in parallel .""" | if parameters [ 1 ] . verbose :
print ( '[file:{0}]' . format ( parameters [ 0 ] ) , file = sys . stderr )
try :
fix_file ( * parameters )
except IOError as error :
print ( unicode ( error ) , file = sys . stderr ) |
def array ( self ) :
"""Data as a [ ` numpy . ndarray ` ] [ 1 ] in the form
# ! python
[ x1 , x2 , x3 , . . . ] ,
[ y1 , y2 , y3 , . . . ]
By default , if unset , this will be set on first access
by calling ` scipy _ data _ fitting . Data . load _ data ` .
When loaded from file , the x and y values will be scaled according
to ` scipy _ data _ fitting . Data . scale ` .
[1 ] : http : / / docs . scipy . org / doc / numpy / reference / generated / numpy . ndarray . html""" | if not hasattr ( self , '_array' ) :
self . _array = self . load_data ( )
return self . _array |
def parse ( self , data ) : # type : ( bytes ) - > None
'''Parse the passed in data into a UDF Partition Header Descriptor .
Parameters :
data - The data to parse .
Returns :
Nothing .''' | if self . _initialized :
raise pycdlibexception . PyCdlibInternalError ( 'UDF Partition Header Descriptor already initialized' )
( unalloc_table_length , unalloc_table_pos , unalloc_bitmap_length , unalloc_bitmap_pos , part_integrity_table_length , part_integrity_table_pos , freed_table_length , freed_table_pos , freed_bitmap_length , freed_bitmap_pos , reserved_unused ) = struct . unpack_from ( self . FMT , data , 0 )
if unalloc_table_length != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header unallocated table length not 0' )
if unalloc_table_pos != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header unallocated table position not 0' )
if unalloc_bitmap_length != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header unallocated bitmap length not 0' )
if unalloc_bitmap_pos != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header unallocated bitmap position not 0' )
if part_integrity_table_length != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header partition integrity length not 0' )
if part_integrity_table_pos != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header partition integrity position not 0' )
if freed_table_length != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header freed table length not 0' )
if freed_table_pos != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header freed table position not 0' )
if freed_bitmap_length != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header freed bitmap length not 0' )
if freed_bitmap_pos != 0 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Partition Header freed bitmap position not 0' )
self . _initialized = True |
def transform ( self , data = None ) :
"""Return transformed data , or transform new data using the same model
parameters
Parameters
data : numpy array , pandas dataframe or list of arrays / dfs
The data to transform . If no data is passed , the xform _ data from
the DataGeometry object will be returned .
Returns
xformed _ data : list of numpy arrays
The transformed data""" | # if no new data passed ,
if data is None :
return self . xform_data
else :
formatted = format_data ( data , semantic = self . semantic , vectorizer = self . vectorizer , corpus = self . corpus , ppca = True )
norm = normalizer ( formatted , normalize = self . normalize )
reduction = reducer ( norm , reduce = self . reduce , ndims = self . reduce [ 'params' ] [ 'n_components' ] )
return aligner ( reduction , align = self . align ) |
def close ( self , nonce : Nonce , balance_hash : BalanceHash , additional_hash : AdditionalHash , signature : Signature , block_identifier : BlockSpecification , ) :
"""Closes the channel using the provided balance proof .""" | self . token_network . close ( channel_identifier = self . channel_identifier , partner = self . participant2 , balance_hash = balance_hash , nonce = nonce , additional_hash = additional_hash , signature = signature , given_block_identifier = block_identifier , ) |
def transform ( self , tfms : Optional [ Tuple [ TfmList , TfmList ] ] = ( None , None ) , ** kwargs ) :
"Set ` tfms ` to be applied to the xs of the train and validation set ." | if not tfms :
tfms = ( None , None )
assert is_listy ( tfms ) and len ( tfms ) == 2 , "Please pass a list of two lists of transforms (train and valid)."
self . train . transform ( tfms [ 0 ] , ** kwargs )
self . valid . transform ( tfms [ 1 ] , ** kwargs )
if self . test :
self . test . transform ( tfms [ 1 ] , ** kwargs )
return self |
def replace_webhook ( self , scaling_group , policy , webhook , name , metadata = None ) :
"""Replace an existing webhook . All of the attributes must be specified .
If you wish to delete any of the optional attributes , pass them in as
None .""" | return self . _manager . replace_webhook ( scaling_group , policy , webhook , name , metadata = metadata ) |
def sample ( self , batch_info : BatchInfo , model : RlModel , number_of_steps : int ) -> Rollout :
"""Sample experience from replay buffer and return a batch""" | if self . forward_steps > 1 :
transitions = self . replay_buffer . sample_forward_transitions ( batch_size = number_of_steps , batch_info = batch_info , forward_steps = self . forward_steps , discount_factor = self . discount_factor )
else :
transitions = self . replay_buffer . sample_transitions ( batch_size = number_of_steps , batch_info = batch_info )
if self . ret_rms is not None :
rewards = transitions . transition_tensors [ 'rewards' ]
new_rewards = torch . clamp ( rewards / np . sqrt ( self . ret_rms . var + 1e-8 ) , - self . clip_obs , self . clip_obs )
transitions . transition_tensors [ 'rewards' ] = new_rewards
return transitions |
def normalize_signature ( func ) :
"""Decorator . Combine args and kwargs . Unpack single item tuples .""" | @ wraps ( func )
def wrapper ( * args , ** kwargs ) :
if kwargs :
args = args , kwargs
if len ( args ) is 1 :
args = args [ 0 ]
return func ( args )
return wrapper |
def report_entry_label ( self , entry_id ) :
"""Return the best label of the asked entry .
Parameters
entry _ id : int
The index of the sample to ask .
Returns
label : object
The best label of the given sample .""" | pruning = self . _find_root_pruning ( entry_id )
return self . classes [ self . _best_label ( pruning ) ] |
def run_preassembly ( stmts_in , ** kwargs ) :
"""Run preassembly on a list of statements .
Parameters
stmts _ in : list [ indra . statements . Statement ]
A list of statements to preassemble .
return _ toplevel : Optional [ bool ]
If True , only the top - level statements are returned . If False ,
all statements are returned irrespective of level of specificity .
Default : True
poolsize : Optional [ int ]
The number of worker processes to use to parallelize the
comparisons performed by the function . If None ( default ) , no
parallelization is performed . NOTE : Parallelization is only
available on Python 3.4 and above .
size _ cutoff : Optional [ int ]
Groups with size _ cutoff or more statements are sent to worker
processes , while smaller groups are compared in the parent process .
Default value is 100 . Not relevant when parallelization is not
used .
belief _ scorer : Optional [ indra . belief . BeliefScorer ]
Instance of BeliefScorer class to use in calculating Statement
probabilities . If None is provided ( default ) , then the default
scorer is used .
hierarchies : Optional [ dict ]
Dict of hierarchy managers to use for preassembly
flatten _ evidence : Optional [ bool ]
If True , evidences are collected and flattened via supports / supported _ by
links . Default : False
flatten _ evidence _ collect _ from : Optional [ str ]
String indicating whether to collect and flatten evidence from the
` supports ` attribute of each statement or the ` supported _ by ` attribute .
If not set , defaults to ' supported _ by ' .
Only relevant when flatten _ evidence is True .
save : Optional [ str ]
The name of a pickle file to save the results ( stmts _ out ) into .
save _ unique : Optional [ str ]
The name of a pickle file to save the unique statements into .
Returns
stmts _ out : list [ indra . statements . Statement ]
A list of preassembled top - level statements .""" | dump_pkl_unique = kwargs . get ( 'save_unique' )
belief_scorer = kwargs . get ( 'belief_scorer' )
use_hierarchies = kwargs [ 'hierarchies' ] if 'hierarchies' in kwargs else hierarchies
be = BeliefEngine ( scorer = belief_scorer )
pa = Preassembler ( hierarchies , stmts_in )
run_preassembly_duplicate ( pa , be , save = dump_pkl_unique )
dump_pkl = kwargs . get ( 'save' )
return_toplevel = kwargs . get ( 'return_toplevel' , True )
poolsize = kwargs . get ( 'poolsize' , None )
size_cutoff = kwargs . get ( 'size_cutoff' , 100 )
options = { 'save' : dump_pkl , 'return_toplevel' : return_toplevel , 'poolsize' : poolsize , 'size_cutoff' : size_cutoff , 'flatten_evidence' : kwargs . get ( 'flatten_evidence' , False ) , 'flatten_evidence_collect_from' : kwargs . get ( 'flatten_evidence_collect_from' , 'supported_by' ) }
stmts_out = run_preassembly_related ( pa , be , ** options )
return stmts_out |
def is_snake_case ( string , separator = '_' ) :
"""Checks if a string is formatted as snake case .
A string is considered snake case when :
* it ' s composed only by lowercase letters ( [ a - z ] ) , underscores ( or provided separator ) and optionally numbers ( [ 0-9 ] )
* it does not start / end with an underscore ( or provided separator )
* it does not start with a number
: param string : String to test .
: type string : str
: param separator : String to use as separator .
: type separator : str
: return : True for a snake case string , false otherwise .
: rtype : bool""" | if is_full_string ( string ) :
re_map = { '_' : SNAKE_CASE_TEST_RE , '-' : SNAKE_CASE_TEST_DASH_RE }
re_template = '^[a-z]+([a-z\d]+{sign}|{sign}[a-z\d]+)+[a-z\d]+$'
r = re_map . get ( separator , re . compile ( re_template . format ( sign = re . escape ( separator ) ) ) )
return bool ( r . search ( string ) )
return False |
def post ( self , request , format = None ) :
"""Add a new Channel .""" | data = request . data . copy ( )
# Get chat type record
try :
ct = ChatType . objects . get ( pk = data . pop ( "chat_type" ) )
data [ "chat_type" ] = ct
except ChatType . DoesNotExist :
return typeNotFound404
if not self . is_path_unique ( None , data [ "publish_path" ] , ct . publish_path ) :
return notUnique400
# Get user record
try :
u = User . objects . get ( pk = data . pop ( "owner" ) )
data [ "owner" ] = u
except User . DoesNotExist :
return userNotFound404
c = Channel ( ** data )
c . save ( )
self . handle_webhook ( c )
return Response ( { "text" : "Channel saved." , "method" : "POST" , "saved" : ChannelCMSSerializer ( c ) . data , } , 200 , ) |
def _1_0set_screen_config ( self , size_id , rotation , config_timestamp , timestamp = X . CurrentTime ) :
"""Sets the screen to the specified size and rotation .""" | return _1_0SetScreenConfig ( display = self . display , opcode = self . display . get_extension_major ( extname ) , drawable = self , timestamp = timestamp , config_timestamp = config_timestamp , size_id = size_id , rotation = rotation , ) |
def column_names ( self ) :
"""Returns the column names .
Returns
out : list [ string ]
Column names of the SFrame .""" | if self . _is_vertex_frame ( ) :
return self . __graph__ . __proxy__ . get_vertex_fields ( )
elif self . _is_edge_frame ( ) :
return self . __graph__ . __proxy__ . get_edge_fields ( ) |
def rebuildGrid ( self ) :
"""Rebuilds the ruler data .""" | vruler = self . verticalRuler ( )
hruler = self . horizontalRuler ( )
rect = self . _buildData [ 'grid_rect' ]
# process the vertical ruler
h_lines = [ ]
h_alt = [ ]
h_notches = [ ]
vpstart = vruler . padStart ( )
vnotches = vruler . notches ( )
vpend = vruler . padEnd ( )
vcount = len ( vnotches ) + vpstart + vpend
deltay = rect . height ( ) / max ( ( vcount - 1 ) , 1 )
y = rect . bottom ( )
alt = False
for i in range ( vcount ) :
h_lines . append ( QLineF ( rect . left ( ) , y , rect . right ( ) , y ) )
# store alternate color
if ( alt ) :
alt_rect = QRectF ( rect . left ( ) , y , rect . width ( ) , deltay )
h_alt . append ( alt_rect )
# store notch information
nidx = i - vpstart
if ( 0 <= nidx and nidx < len ( vnotches ) ) :
notch = vnotches [ nidx ]
notch_rect = QRectF ( 0 , y - 3 , rect . left ( ) - 3 , deltay )
h_notches . append ( ( notch_rect , notch ) )
y -= deltay
alt = not alt
self . _buildData [ 'grid_h_lines' ] = h_lines
self . _buildData [ 'grid_h_alt' ] = h_alt
self . _buildData [ 'grid_h_notches' ] = h_notches
# process the horizontal ruler
v_lines = [ ]
v_alt = [ ]
v_notches = [ ]
hpstart = hruler . padStart ( )
hnotches = hruler . notches ( )
hpend = hruler . padEnd ( )
hcount = len ( hnotches ) + hpstart + hpend
deltax = rect . width ( ) / max ( ( hcount - 1 ) , 1 )
x = rect . left ( )
alt = False
for i in range ( hcount ) :
v_lines . append ( QLineF ( x , rect . top ( ) , x , rect . bottom ( ) ) )
# store alternate info
if ( alt ) :
alt_rect = QRectF ( x - deltax , rect . top ( ) , deltax , rect . height ( ) )
v_alt . append ( alt_rect )
# store notch information
nidx = i - hpstart
if ( 0 <= nidx and nidx < len ( hnotches ) ) :
notch = hnotches [ nidx ]
notch_rect = QRectF ( x - ( deltax / 2.0 ) , rect . bottom ( ) + 3 , deltax , 13 )
v_notches . append ( ( notch_rect , notch ) )
x += deltax
alt = not alt
self . _buildData [ 'grid_v_lines' ] = v_lines
self . _buildData [ 'grid_v_alt' ] = v_alt
self . _buildData [ 'grid_v_notches' ] = v_notches
# draw the axis lines
axis_lines = [ ]
axis_lines . append ( QLineF ( rect . left ( ) , rect . top ( ) , rect . left ( ) , rect . bottom ( ) ) )
axis_lines . append ( QLineF ( rect . left ( ) , rect . bottom ( ) , rect . right ( ) , rect . bottom ( ) ) )
self . _buildData [ 'axis_lines' ] = axis_lines |
def create_default_database ( reset : bool = False ) -> GraphDatabaseInterface :
"""Creates and returns a default SQLAlchemy database interface to use .
Arguments :
reset ( bool ) : Whether to reset the database if it happens to exist already .""" | import sqlalchemy
from sqlalchemy . ext . declarative import declarative_base
from sqlalchemy . orm import sessionmaker
from sqlalchemy . pool import StaticPool
Base = declarative_base ( )
engine = sqlalchemy . create_engine ( "sqlite:///SpotifyArtistGraph.db" , poolclass = StaticPool )
Session = sessionmaker ( bind = engine )
dbi : GraphDatabaseInterface = create_graph_database_interface ( sqlalchemy , Session ( ) , Base , sqlalchemy . orm . relationship )
if reset :
Base . metadata . drop_all ( engine )
Base . metadata . create_all ( engine )
return dbi |
def add_if_unique ( self , name ) :
"""Returns ` ` True ` ` on success .
Returns ` ` False ` ` if the name already exists in the namespace .""" | with self . lock :
if name not in self . names :
self . names . append ( name )
return True
return False |
def compile_dir ( dfn , optimize_python = True ) :
'''Compile * . py in directory ` dfn ` to * . pyo''' | if PYTHON is None :
return
if int ( PYTHON_VERSION [ 0 ] ) >= 3 :
args = [ PYTHON , '-m' , 'compileall' , '-b' , '-f' , dfn ]
else :
args = [ PYTHON , '-m' , 'compileall' , '-f' , dfn ]
if optimize_python : # - OO = strip docstrings
args . insert ( 1 , '-OO' )
return_code = subprocess . call ( args )
if return_code != 0 :
print ( 'Error while running "{}"' . format ( ' ' . join ( args ) ) )
print ( 'This probably means one of your Python files has a syntax ' 'error, see logs above' )
exit ( 1 ) |
def get_src_builders ( self , env ) :
"""Returns the list of source Builders for this Builder .
This exists mainly to look up Builders referenced as
strings in the ' BUILDER ' variable of the construction
environment and cache the result .""" | memo_key = id ( env )
try :
memo_dict = self . _memo [ 'get_src_builders' ]
except KeyError :
memo_dict = { }
self . _memo [ 'get_src_builders' ] = memo_dict
else :
try :
return memo_dict [ memo_key ]
except KeyError :
pass
builders = [ ]
for bld in self . src_builder :
if SCons . Util . is_String ( bld ) :
try :
bld = env [ 'BUILDERS' ] [ bld ]
except KeyError :
continue
builders . append ( bld )
memo_dict [ memo_key ] = builders
return builders |
def add_clock ( self , timezone , color = 'lightgreen' , show_seconds = None ) :
"""Add a clock to the grid . ` timezone ` is a string representing
a valid timezone .""" | if show_seconds is None :
show_seconds = self . options . show_seconds
clock = Clock ( self . app , self . logger , timezone , color = color , font = self . options . font , show_seconds = show_seconds )
clock . widget . cfg_expand ( 0x7 , 0x7 )
num_clocks = len ( self . clocks )
cols = self . settings . get ( 'columns' )
row = num_clocks // cols
col = num_clocks % cols
self . clocks [ timezone ] = clock
self . grid . add_widget ( clock . widget , row , col , stretch = 1 ) |
def _convert ( self , desired_type : Type [ T ] , obj : S , logger : Logger , options : Dict [ str , Dict [ str , Any ] ] ) -> T :
"""Apply the converters of the chain in order to produce the desired result . Only the last converter will see the
' desired type ' , the others will be asked to produce their declared to _ type .
: param desired _ type :
: param obj :
: param logger :
: param options :
: return :""" | for converter in self . _converters_list [ : - 1 ] : # convert into each converters destination type
obj = converter . convert ( converter . to_type , obj , logger , options )
# the last converter in the chain should convert to desired type
return self . _converters_list [ - 1 ] . convert ( desired_type , obj , logger , options ) |
def parse_endnotes ( document , xmlcontent ) :
"""Parse endnotes document .
Endnotes are defined in file ' endnotes . xml '""" | endnotes = etree . fromstring ( xmlcontent )
document . endnotes = { }
for note in endnotes . xpath ( './/w:endnote' , namespaces = NAMESPACES ) :
paragraphs = [ parse_paragraph ( document , para ) for para in note . xpath ( './/w:p' , namespaces = NAMESPACES ) ]
document . endnotes [ note . attrib [ _name ( '{{{w}}}id' ) ] ] = paragraphs |
def normalize_datum ( self , datum ) :
"""Convert ` datum ` into something that umsgpack likes .
: param datum : something that we want to process with umsgpack
: return : a packable version of ` datum `
: raises TypeError : if ` datum ` cannot be packed
This message is called by : meth : ` . packb ` to recursively normalize
an input value before passing it to : func : ` umsgpack . packb ` . Values
are normalized according to the following table .
| * * Value * * | * * MsgPack Family * * |
| : data : ` None ` | ` nil byte ` _ ( 0xC0 ) |
| : data : ` True ` | ` true byte ` _ ( 0xC3 ) |
| : data : ` False ` | ` false byte ` _ ( 0xC2 ) |
| : class : ` int ` | ` integer family ` _ |
| : class : ` float ` | ` float family ` _ |
| String | ` str family ` _ |
| : class : ` bytes ` | ` bin family ` _ |
| : class : ` bytearray ` | ` bin family ` _ |
| : class : ` memoryview ` | ` bin family ` _ |
| : class : ` collections . Sequence ` | ` array family ` _ |
| : class : ` collections . Set ` | ` array family ` _ |
| : class : ` collections . Mapping ` | ` map family ` _ |
| : class : ` uuid . UUID ` | Converted to String |
. . _ nil byte : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # formats - nil
. . _ true byte : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # bool - format - family
. . _ false byte : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # bool - format - family
. . _ integer family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # int - format - family
. . _ float family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # float - format - family
. . _ str family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # str - format - family
. . _ array family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # array - format - family
. . _ map family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md
# mapping - format - family
. . _ bin family : https : / / github . com / msgpack / msgpack / blob /
0b8f5ac67cdd130f4d4d4fe6afb839b989fdb86a / spec . md # bin - format - family""" | if datum is None :
return datum
if isinstance ( datum , self . PACKABLE_TYPES ) :
return datum
if isinstance ( datum , uuid . UUID ) :
datum = str ( datum )
if isinstance ( datum , bytearray ) :
datum = bytes ( datum )
if isinstance ( datum , memoryview ) :
datum = datum . tobytes ( )
if hasattr ( datum , 'isoformat' ) :
datum = datum . isoformat ( )
if isinstance ( datum , ( bytes , str ) ) :
return datum
if isinstance ( datum , ( collections . Sequence , collections . Set ) ) :
return [ self . normalize_datum ( item ) for item in datum ]
if isinstance ( datum , collections . Mapping ) :
out = { }
for k , v in datum . items ( ) :
out [ k ] = self . normalize_datum ( v )
return out
raise TypeError ( '{} is not msgpackable' . format ( datum . __class__ . __name__ ) ) |
def modify ( self , ** kwargs ) :
"""Modify settings for a check . The provided settings will overwrite
previous values . Settings not provided will stay the same as before
the update . To clear an existing value , provide an empty value .
Please note that you cannot change the type of a check once it has
been created .
General parameters :
* name - - Check name
Type : String
* host - Target host
Type : String
* paused - - Check should be paused
Type : Boolean
* resolution - - Check resolution time ( in minutes )
Type : Integer [ 1 , 5 , 15 , 30 , 60]
* contactids - - Comma separated list of contact IDs
Type : String
* sendtoemail - - Send alerts as email
Type : Boolean
* sendtosms - - Send alerts as SMS
Type : Boolean
* sendtotwitter - - Send alerts through Twitter
Type : Boolean
* sendtoiphone - - Send alerts to iPhone
Type : Boolean
* sendtoandroid - - Send alerts to Android
Type : Boolean
* sendnotificationwhendown - - Send notification when check is down
the given number of times
Type : Integer
* notifyagainevery - - Set how many results to wait for in between
notices
Type : Integer
* notifywhenbackup - - Notify when back up again
Type : Boolean
* use _ legacy _ notifications - - Use old notifications instead of BeepManager
Type : Boolean
* probe _ filters - - Can be one of region : NA , region : EU , region : APAC
Type : String
HTTP check options :
* url - - Target path on server
Type : String
* encryption - - Use SSL / TLS
Type : Boolean
* port - - Target server port
Type : Integer
* auth - - Username and password for HTTP authentication
Example : user : password
Type : String
* shouldcontain - - Target site should contain this string .
Cannot be combined with ' shouldnotcontain '
Type : String
* shouldnotcontain - - Target site should not contain this string .
Cannot be combined with ' shouldcontain '
Type : String
* postdata - - Data that should be posted to the web page ,
for example submission data for a sign - up or login form .
The data needs to be formatted in the same way as a web browser
would send it to the web server
Type : String
* requestheader < NAME > - - Custom HTTP header , replace < NAME > with
desired header name . Header in form : Header : Value
Type : String
HTTPCustom check options :
* url - - Target path on server
Type : String
* encryption - - Use SSL / TLS
Type : Boolean
* port - - Target server port
Type : Integer
* auth - - Username and password for HTTP authentication
Example : user : password
Type : String
* additionalurls - - Colon - separated list of additonal URLS with
hostname included
Type : String
TCP check options :
* port - - Target server port
Type : Integer
* stringtosend - - String to send
Type : String
* stringtoexpect - - String to expect in response
Type : String
DNS check options :
* expectedip - - Expected IP
Type : String
* nameserver - - Nameserver to check
Type : String
UDP check options :
* port - - Target server port
Type : Integer
* stringtosend - - String to send
Type : String
* stringtoexpect - - String to expect in response
Type : String
SMTP check options :
* port - - Target server port
Type : Integer
* auth - - Username and password for target SMTP authentication .
Example : user : password
Type : String
* stringtoexpect - - String to expect in response
Type : String
* encryption - - Use connection encryption
Type : Boolean
POP3 check options :
* port - - Target server port
Type : Integer
* stringtoexpect - - String to expect in response
Type : String
* encryption - - Use connection encryption
Type : Boolean
IMAP check options :
* port - - Target server port
Type : Integer
* stringtoexpect - - String to expect in response
Type : String
* encryption - - Use connection encryption
Type : Boolean""" | # Warn user about unhandled parameters
for key in kwargs :
if key not in [ 'paused' , 'resolution' , 'contactids' , 'sendtoemail' , 'sendtosms' , 'sendtotwitter' , 'sendtoiphone' , 'sendnotificationwhendown' , 'notifyagainevery' , 'notifywhenbackup' , 'created' , 'type' , 'hostname' , 'status' , 'lasterrortime' , 'lasttesttime' , 'url' , 'encryption' , 'port' , 'auth' , 'shouldcontain' , 'shouldnotcontain' , 'postdata' , 'additionalurls' , 'stringtosend' , 'stringtoexpect' , 'expectedip' , 'nameserver' , 'use_legacy_notifications' , 'host' , 'alert_policy' , 'autoresolve' , 'probe_filters' ] :
sys . stderr . write ( "'%s'" % key + ' is not a valid argument of' + '<PingdomCheck>.modify()\n' )
# If one of the legacy parameters is used , it is required to set the legacy flag .
# https : / / github . com / KennethWilke / PingdomLib / issues / 12
if any ( [ k for k in kwargs if k in legacy_notification_parameters ] ) :
if "use_legacy_notifications" in kwargs and kwargs [ "use_legacy_notifications" ] != True :
raise Exception ( "Cannot set legacy parameter when use_legacy_notifications is not True" )
kwargs [ "use_legacy_notifications" ] = True
response = self . pingdom . request ( "PUT" , 'checks/%s' % self . id , kwargs )
return response . json ( ) [ 'message' ] |
def get_rmq_cluster_status ( self , sentry_unit ) :
"""Execute rabbitmq cluster status command on a unit and return
the full output .
: param unit : sentry unit
: returns : String containing console output of cluster status command""" | cmd = 'rabbitmqctl cluster_status'
output , _ = self . run_cmd_unit ( sentry_unit , cmd )
self . log . debug ( '{} cluster_status:\n{}' . format ( sentry_unit . info [ 'unit_name' ] , output ) )
return str ( output ) |
def commit ( func ) :
'''Used as a decorator for automatically making session commits''' | def wrap ( ** kwarg ) :
with session_withcommit ( ) as session :
a = func ( ** kwarg )
session . add ( a )
return session . query ( songs ) . order_by ( songs . song_id . desc ( ) ) . first ( ) . song_id
return wrap |
def declare_dict ( self ) :
"""Return declared sections , terms and synonyms as a dict""" | # Run the parser , if it has not been run yet .
if not self . root :
for _ in self :
pass
return { 'sections' : self . _declared_sections , 'terms' : self . _declared_terms , 'synonyms' : self . synonyms } |
def get_pyproject ( path ) : # type : ( Union [ STRING _ TYPE , Path ] ) - > Optional [ Tuple [ List [ STRING _ TYPE ] , STRING _ TYPE ] ]
"""Given a base path , look for the corresponding ` ` pyproject . toml ` ` file and return its
build _ requires and build _ backend .
: param AnyStr path : The root path of the project , should be a directory ( will be truncated )
: return : A 2 tuple of build requirements and the build backend
: rtype : Optional [ Tuple [ List [ AnyStr ] , AnyStr ] ]""" | if not path :
return
from vistir . compat import Path
if not isinstance ( path , Path ) :
path = Path ( path )
if not path . is_dir ( ) :
path = path . parent
pp_toml = path . joinpath ( "pyproject.toml" )
setup_py = path . joinpath ( "setup.py" )
if not pp_toml . exists ( ) :
if not setup_py . exists ( ) :
return None
requires = [ "setuptools>=40.8" , "wheel" ]
backend = get_default_pyproject_backend ( )
else :
pyproject_data = { }
with io . open ( pp_toml . as_posix ( ) , encoding = "utf-8" ) as fh :
pyproject_data = tomlkit . loads ( fh . read ( ) )
build_system = pyproject_data . get ( "build-system" , None )
if build_system is None :
if setup_py . exists ( ) :
requires = [ "setuptools>=40.8" , "wheel" ]
backend = get_default_pyproject_backend ( )
else :
requires = [ "setuptools>=40.8" , "wheel" ]
backend = get_default_pyproject_backend ( )
build_system = { "requires" : requires , "build-backend" : backend }
pyproject_data [ "build_system" ] = build_system
else :
requires = build_system . get ( "requires" , [ "setuptools>=40.8" , "wheel" ] )
backend = build_system . get ( "build-backend" , get_default_pyproject_backend ( ) )
return requires , backend |
def diff_bisect ( self , text1 , text2 , deadline ) :
"""Find the ' middle snake ' of a diff , split the problem in two
and return the recursively constructed diff .
See Myers 1986 paper : An O ( ND ) Difference Algorithm and Its Variations .
Args :
text1 : Old string to be diffed .
text2 : New string to be diffed .
deadline : Time at which to bail if not yet complete .
Returns :
Array of diff tuples .""" | # Cache the text lengths to prevent multiple calls .
text1_length = len ( text1 )
text2_length = len ( text2 )
max_d = ( text1_length + text2_length + 1 ) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [ - 1 ] * v_length
v1 [ v_offset + 1 ] = 0
v2 = v1 [ : ]
delta = text1_length - text2_length
# If the total number of characters is odd , then the front path will
# collide with the reverse path .
front = ( delta % 2 != 0 )
# Offsets for start and end of k loop .
# Prevents mapping of space beyond the grid .
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range ( max_d ) : # Bail out if deadline is reached .
if time . time ( ) > deadline :
break
# Walk the front path one step .
for k1 in range ( - d + k1start , d + 1 - k1end , 2 ) :
k1_offset = v_offset + k1
if k1 == - d or ( k1 != d and v1 [ k1_offset - 1 ] < v1 [ k1_offset + 1 ] ) :
x1 = v1 [ k1_offset + 1 ]
else :
x1 = v1 [ k1_offset - 1 ] + 1
y1 = x1 - k1
while ( x1 < text1_length and y1 < text2_length and text1 [ x1 ] == text2 [ y1 ] ) :
x1 += 1
y1 += 1
v1 [ k1_offset ] = x1
if x1 > text1_length : # Ran off the right of the graph .
k1end += 2
elif y1 > text2_length : # Ran off the bottom of the graph .
k1start += 2
elif front :
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2 [ k2_offset ] != - 1 : # Mirror x2 onto top - left coordinate system .
x2 = text1_length - v2 [ k2_offset ]
if x1 >= x2 : # Overlap detected .
return self . diff_bisectSplit ( text1 , text2 , x1 , y1 , deadline )
# Walk the reverse path one step .
for k2 in range ( - d + k2start , d + 1 - k2end , 2 ) :
k2_offset = v_offset + k2
if k2 == - d or ( k2 != d and v2 [ k2_offset - 1 ] < v2 [ k2_offset + 1 ] ) :
x2 = v2 [ k2_offset + 1 ]
else :
x2 = v2 [ k2_offset - 1 ] + 1
y2 = x2 - k2
while ( x2 < text1_length and y2 < text2_length and text1 [ - x2 - 1 ] == text2 [ - y2 - 1 ] ) :
x2 += 1
y2 += 1
v2 [ k2_offset ] = x2
if x2 > text1_length : # Ran off the left of the graph .
k2end += 2
elif y2 > text2_length : # Ran off the top of the graph .
k2start += 2
elif not front :
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1 [ k1_offset ] != - 1 :
x1 = v1 [ k1_offset ]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top - left coordinate system .
x2 = text1_length - x2
if x1 >= x2 : # Overlap detected .
return self . diff_bisectSplit ( text1 , text2 , x1 , y1 , deadline )
# Diff took too long and hit the deadline or
# number of diffs equals number of characters , no commonality at all .
return [ ( self . DIFF_DELETE , text1 ) , ( self . DIFF_INSERT , text2 ) ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.