signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def branches ( config , searchstring = "" ) :
"""List all branches . And if exactly 1 found , offer to check it out ."""
|
repo = config . repo
branches_ = list ( find ( repo , searchstring ) )
if branches_ :
merged = get_merged_branches ( repo )
info_out ( "Found existing branches..." )
print_list ( branches_ , merged )
if len ( branches_ ) == 1 and searchstring : # If the found branch is the current one , error
active_branch = repo . active_branch
if active_branch == branches_ [ 0 ] :
error_out ( "You're already on '{}'" . format ( branches_ [ 0 ] . name ) )
branch_name = branches_ [ 0 ] . name
if len ( branch_name ) > 50 :
branch_name = branch_name [ : 47 ] + "…"
check_it_out = ( input ( "Check out '{}'? [Y/n] " . format ( branch_name ) ) . lower ( ) . strip ( ) != "n" )
if check_it_out :
branches_ [ 0 ] . checkout ( )
elif searchstring :
error_out ( "Found no branches matching '{}'." . format ( searchstring ) )
else :
error_out ( "Found no branches." )
|
def update_from_schema ( cfg , cfgin , schema ) :
"""Update configuration dictionary ` ` cfg ` ` with the contents of
` ` cfgin ` ` using the ` ` schema ` ` dictionary to determine the valid
input keys .
Parameters
cfg : dict
Configuration dictionary to be updated .
cfgin : dict
New configuration dictionary that will be merged with ` ` cfg ` ` .
schema : dict
Configuration schema defining the valid configuration keys and
their types .
Returns
cfgout : dict"""
|
cfgout = copy . deepcopy ( cfg )
for k , v in schema . items ( ) :
if k not in cfgin :
continue
if isinstance ( v , dict ) :
cfgout . setdefault ( k , { } )
cfgout [ k ] = update_from_schema ( cfg [ k ] , cfgin [ k ] , v )
elif v [ 2 ] is dict :
cfgout [ k ] = utils . merge_dict ( cfg [ k ] , cfgin [ k ] , add_new_keys = True )
else :
cfgout [ k ] = cfgin [ k ]
return cfgout
|
def remove_task_db ( self , fs_id ) :
'''将任务从数据库中删除'''
|
sql = 'DELETE FROM tasks WHERE fsid=?'
self . cursor . execute ( sql , [ fs_id , ] )
self . check_commit ( )
|
def check_work_done ( self , grp ) :
"""Check for the existence of alignment and result files ."""
|
id_ = self . get_id ( grp )
concat_file = os . path . join ( self . cache_dir , '{}.phy' . format ( id_ ) )
result_file = os . path . join ( self . cache_dir , '{}.{}.json' . format ( id_ , self . task_interface . name ) )
return os . path . exists ( concat_file ) , os . path . exists ( result_file )
|
def printText ( self , stream = None ) :
"""Prints a text representation of this sequence to the given stream or
standard output ."""
|
if stream is None :
stream = sys . stdout
stream . write ( '# seqid : %u\n' % self . seqid )
stream . write ( '# version : %u\n' % self . version )
stream . write ( '# crc32 : 0x%04x\n' % self . crc32 )
stream . write ( '# ncmds : %u\n' % len ( self . commands ) )
stream . write ( '# duration: %.3fs\n' % self . duration )
stream . write ( '\n' )
for line in self . lines :
stream . write ( str ( line ) )
stream . write ( '\n' )
|
def get_active_threads_involving_all_participants ( self , * participant_ids ) :
"""Gets the threads where the specified participants are active and no one has left ."""
|
query = Thread . objects . exclude ( participation__date_left__lte = now ( ) ) . annotate ( count_participants = Count ( 'participants' ) ) . filter ( count_participants = len ( participant_ids ) )
for participant_id in participant_ids :
query = query . filter ( participants__id = participant_id )
return query . distinct ( )
|
def convert_file_to_upload_string ( i ) :
"""Input : {
filename - file name to convert
Output : {
return - return code = 0 , if successful
> 0 , if error
( error ) - error text if return > 0
file _ content _ base64 - string that can be transmitted through Internet"""
|
import base64
fn = i [ 'filename' ]
if not os . path . isfile ( fn ) :
return { 'return' : 1 , 'error' : 'file ' + fn + ' not found' }
s = b''
try :
f = open ( fn , 'rb' )
while True :
x = f . read ( 32768 ) ;
if not x :
break
s += x
f . close ( )
except Exception as e :
return { 'return' : 1 , 'error' : 'error reading file (' + format ( e ) + ')' }
s = base64 . urlsafe_b64encode ( s ) . decode ( 'utf8' )
return { 'return' : 0 , 'file_content_base64' : s }
|
def add_database_user ( self , new_username , new_password , permissions = None ) :
"""Add database user .
: param permissions : A ` ` ( readFrom , writeTo ) ` ` tuple"""
|
url = "db/{0}/users" . format ( self . _database )
data = { 'name' : new_username , 'password' : new_password }
if permissions :
try :
data [ 'readFrom' ] , data [ 'writeTo' ] = permissions
except ( ValueError , TypeError ) :
raise TypeError ( "'permissions' must be (readFrom, writeTo) tuple" )
self . request ( url = url , method = 'POST' , data = data , expected_response_code = 200 )
return True
|
def bokeh_palette ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
'''Generate an inline visual representations of a single color palette .
This function evaluates the expression ` ` " palette = % s " % text ` ` , in the
context of a ` ` globals ` ` namespace that has previously imported all of
` ` bokeh . plotting ` ` . The resulting value for ` ` palette ` ` is used to
construct a sequence of HTML ` ` < span > ` ` elements for each color .
If evaluating the palette expression fails or does not produce a list or
tuple of all strings , then a SphinxError is raised to terminate the build .
For details on the arguments to this function , consult the Docutils docs :
http : / / docutils . sourceforge . net / docs / howto / rst - roles . html # define - the - role - function'''
|
try :
exec ( "palette = %s" % text , _globals )
except Exception as e :
raise SphinxError ( "cannot evaluate palette expression '%r', reason: %s" % ( text , e ) )
p = _globals . get ( 'palette' , None )
if not isinstance ( p , ( list , tuple ) ) or not all ( isinstance ( x , str ) for x in p ) :
raise SphinxError ( "palette expression '%r' generated invalid or no output: %s" % ( text , p ) )
w = 20 if len ( p ) < 15 else 10 if len ( p ) < 32 else 5 if len ( p ) < 64 else 2 if len ( p ) < 128 else 1
html = PALETTE_DETAIL . render ( palette = p , width = w )
node = nodes . raw ( '' , html , format = "html" )
return [ node ] , [ ]
|
def scroll_to_end_vertically ( self , steps = 10 , * args , ** selectors ) :
"""Scroll the object which has * selectors * attributes to * end * vertically .
See ` Scroll Forward Vertically ` for more details ."""
|
return self . device ( ** selectors ) . scroll . vert . toEnd ( steps = steps )
|
def transliterate ( text , lang1_code , lang2_code ) :
"""convert the source language script ( lang1 ) to target language script ( lang2)
text : text to transliterate
lang1 _ code : language 1 code
lang1 _ code : language 2 code"""
|
if ( lang1_code in langinfo . SCRIPT_RANGES ) and ( lang2_code in langinfo . SCRIPT_RANGES ) : # if Sinhala is source , do a mapping to Devanagari first
if lang1_code == 'si' :
text = sdt . sinhala_to_devanagari ( text )
lang1_code = 'hi'
# if Sinhala is target , make Devanagiri the intermediate target
org_lang2_code = ''
if lang2_code == 'si' :
lang2_code = 'hi'
org_lang2_code = 'si'
trans_lit_text = [ ]
for c in text :
newc = c
offset = ord ( c ) - langinfo . SCRIPT_RANGES [ lang1_code ] [ 0 ]
if offset >= langinfo . COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo . COORDINATED_RANGE_END_INCLUSIVE :
if lang2_code == 'ta' : # tamil exceptions
offset = UnicodeIndicTransliterator . _correct_tamil_mapping ( offset )
newc = py23char ( langinfo . SCRIPT_RANGES [ lang2_code ] [ 0 ] + offset )
trans_lit_text . append ( newc )
# if Sinhala is source , do a mapping to Devanagari first
if org_lang2_code == 'si' :
return sdt . devanagari_to_sinhala ( '' . join ( trans_lit_text ) )
return ( '' . join ( trans_lit_text ) )
else :
return text
|
def rename_stream_kwargs ( stream , kwargs , reverse = False ) :
"""Given a stream and a kwargs dictionary of parameter values , map to
the corresponding dictionary where the keys are substituted with the
appropriately renamed string .
If reverse , the output will be a dictionary using the original
parameter names given a dictionary using the renamed equivalents ."""
|
mapped_kwargs = { }
mapping = stream_name_mapping ( stream , reverse = reverse )
for k , v in kwargs . items ( ) :
if k not in mapping :
msg = 'Could not map key {key} {direction} renamed equivalent'
direction = 'from' if reverse else 'to'
raise KeyError ( msg . format ( key = repr ( k ) , direction = direction ) )
mapped_kwargs [ mapping [ k ] ] = v
return mapped_kwargs
|
def fingerprint2keyid ( fingerprint ) :
"""Returns keyid from fingerprint for private keys"""
|
if gnupg is None :
return
gpg = gnupg . GPG ( )
private_keys = gpg . list_keys ( True )
keyid = None
for private_key in private_keys :
if private_key [ 'fingerprint' ] == config [ "gpg_key_fingerprint" ] :
keyid = private_key [ 'keyid' ]
break
return keyid
|
def read_from_memory_region ( self , * , region_name : str ) :
"""Reads from a memory region named region _ name on the QAM .
This is a shim over the eventual API and only can return memory from a region named
" ro " of type ` ` BIT ` ` .
: param region _ name : The string naming the declared memory region .
: return : A list of values of the appropriate type ."""
|
warnings . warn ( "pyquil.api._qam.QAM.read_from_memory_region is deprecated, please use " "pyquil.api._qam.QAM.read_memory instead." , DeprecationWarning )
assert self . status == 'done'
if region_name != "ro" :
raise QAMError ( "Currently only allowed to read measurement data from ro." )
if self . _bitstrings is None :
raise QAMError ( "Bitstrings have not yet been populated. Something has gone wrong." )
return self . _bitstrings
|
def check_house ( self , complex : str , house : str ) -> bool :
"""Check if given house exists in the rumetr database"""
|
self . check_complex ( complex )
if '%s__%s' % ( complex , house ) in self . _checked_houses :
return True
try :
self . get ( 'developers/{developer}/complexes/{complex}/houses/{house}/' . format ( developer = self . developer , complex = complex , house = house , ) )
except exceptions . Rumetr404Exception :
raise exceptions . RumetrHouseNotFound ( 'Unknown house (complex is known) — may be you should create one?' )
self . _checked_houses . add ( '%s__%s' % ( complex , house ) )
return True
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'language' ) and self . language is not None :
_dict [ 'language' ] = self . language
if hasattr ( self , 'name' ) and self . name is not None :
_dict [ 'name' ] = self . name
return _dict
|
def transient_change_detect ( self , * class_build_args , ** class_build_kwargs ) :
"""This should be called when we want to detect a change in the status of the system regarding the transient list
This function also applies changes due to regex _ set updates if needed
_ transient _ change _ detect - > _ transient _ change _ diff - > _ update _ transients"""
|
transient_detected = set ( self . get_transients_available ( ) )
# TODO : unify that last _ got _ set with the * _ available . they are essentially the same
tst_gone = self . last_transients_detected - transient_detected
# print ( " INTERFACING + { transient _ detected } " . format ( * * locals ( ) ) )
# print ( " INTERFACING - { tst _ gone } " . format ( * * locals ( ) ) )
dt = self . transient_change_diff ( # we start interfacing with new matches ,
# but we also need to update old matches that match regex now
transient_appeared = transient_detected , transient_gone = tst_gone , * class_build_args , ** class_build_kwargs )
self . last_transients_detected . update ( transient_detected )
if tst_gone :
self . last_transients_detected . difference_update ( tst_gone )
return dt
|
def _get_update_fn ( strategy ) :
"""Select dict - like class based on merge strategy and orderness of keys .
: param merge : Specify strategy from MERGE _ STRATEGIES of how to merge dicts .
: return : Callable to update objects"""
|
if strategy is None :
strategy = MS_DICTS
try :
return _MERGE_FNS [ strategy ]
except KeyError :
if callable ( strategy ) :
return strategy
raise ValueError ( "Wrong merge strategy: %r" % strategy )
|
def get_merge_bases ( self , repository_name_or_id , commit_id , other_commit_id , project = None , other_collection_id = None , other_repository_id = None ) :
"""GetMergeBases .
[ Preview API ] Find the merge bases of two commits , optionally across forks . If otherRepositoryId is not specified , the merge bases will only be calculated within the context of the local repositoryNameOrId .
: param str repository _ name _ or _ id : ID or name of the local repository .
: param str commit _ id : First commit , usually the tip of the target branch of the potential merge .
: param str other _ commit _ id : Other commit , usually the tip of the source branch of the potential merge .
: param str project : Project ID or project name
: param str other _ collection _ id : The collection ID where otherCommitId lives .
: param str other _ repository _ id : The repository ID where otherCommitId lives .
: rtype : [ GitCommitRef ]"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if repository_name_or_id is not None :
route_values [ 'repositoryNameOrId' ] = self . _serialize . url ( 'repository_name_or_id' , repository_name_or_id , 'str' )
if commit_id is not None :
route_values [ 'commitId' ] = self . _serialize . url ( 'commit_id' , commit_id , 'str' )
query_parameters = { }
if other_commit_id is not None :
query_parameters [ 'otherCommitId' ] = self . _serialize . query ( 'other_commit_id' , other_commit_id , 'str' )
if other_collection_id is not None :
query_parameters [ 'otherCollectionId' ] = self . _serialize . query ( 'other_collection_id' , other_collection_id , 'str' )
if other_repository_id is not None :
query_parameters [ 'otherRepositoryId' ] = self . _serialize . query ( 'other_repository_id' , other_repository_id , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '7cf2abb6-c964-4f7e-9872-f78c66e72e9c' , version = '5.0-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( '[GitCommitRef]' , self . _unwrap_collection ( response ) )
|
def render_image ( self , rgbobj , dst_x , dst_y ) :
"""Render the image represented by ( rgbobj ) at dst _ x , dst _ y
in the offscreen pixmap ."""
|
self . logger . debug ( "redraw pixmap=%s" % ( self . pixmap ) )
if self . pixmap is None :
return
self . logger . debug ( "drawing to pixmap" )
# Prepare array for rendering
arr = rgbobj . get_array ( self . rgb_order , dtype = np . uint8 )
( height , width ) = arr . shape [ : 2 ]
return self . _render_offscreen ( self . pixmap , arr , dst_x , dst_y , width , height )
|
def acquire ( self , blocking = True ) :
"""Use Redis to hold a shared , distributed lock named ` ` name ` ` .
Returns True once the lock is acquired .
If ` ` blocking ` ` is False , always return immediately . If the lock
was acquired , return True , otherwise return False ."""
|
sleep = self . sleep
timeout = self . timeout
while 1 :
unixtime = time . time ( )
if timeout :
timeout_at = unixtime + timeout
else :
timeout_at = Lock . LOCK_FOREVER
timeout_at = float ( timeout_at )
if self . redis . setnx ( self . name , timeout_at ) :
self . acquired_until = timeout_at
return True
# We want blocking , but didn ' t acquire the lock
# check to see if the current lock is expired
try :
existing = float ( self . redis . get ( self . name ) or 1 )
except ValueError :
existing = Lock . LOCK_FOREVER
if existing < unixtime : # the previous lock is expired , attempt to overwrite it
try :
existing = float ( self . redis . getset ( self . name , timeout_at ) or 1 )
except ValueError :
existing = Lock . LOCK_FOREVER
if existing < unixtime : # we successfully acquired the lock
self . acquired_until = timeout_at
return True
if not blocking :
return False
time . sleep ( sleep )
|
def get_meta_rdf ( self , fmt = 'n3' ) :
"""Get the metadata for this Thing in rdf fmt
Advanced users who want to manipulate the RDF for this Thing directly without the
[ ThingMeta ] ( ThingMeta . m . html # IoticAgent . IOT . ThingMeta . ThingMeta ) helper object
Returns the RDF in the format you specify . - OR -
Raises [ IOTException ] ( . / Exceptions . m . html # IoticAgent . IOT . Exceptions . IOTException )
containing the error if the infrastructure detects a problem
Raises [ LinkException ] ( . . / Core / AmqpLink . m . html # IoticAgent . Core . AmqpLink . LinkException )
if there is a communications problem between you and the infrastructure
` fmt ` ( optional ) ( string ) The format of RDF you want returned .
Valid formats are : " xml " , " n3 " , " turtle " """
|
evt = self . _client . _request_entity_meta_get ( self . __lid , fmt = fmt )
self . _client . _wait_and_except_if_failed ( evt )
return evt . payload [ 'meta' ]
|
def tracksplot ( adata , var_names , groupby , use_raw = None , log = False , dendrogram = False , gene_symbols = None , var_group_positions = None , var_group_labels = None , layer = None , show = None , save = None , figsize = None , ** kwds ) :
"""In this type of plot each var _ name is plotted as a filled line plot where the
y values correspond to the var _ name values and x is each of the cells . Best results
are obtained when using raw counts that are not log .
` groupby ` is required to sort and order the values using the respective group
and should be a categorical value .
Parameters
{ common _ plot _ args }
{ show _ save _ ax }
* * kwds : keyword arguments
Are passed to ` seaborn . heatmap ` .
Returns
A list of : class : ` ~ matplotlib . axes . Axes ` .
Examples
> > > adata = sc . datasets . pbmc68k _ reduced ( )
> > > sc . pl . tracksplot ( adata , [ ' C1QA ' , ' PSAP ' , ' CD79A ' , ' CD79B ' , ' CST3 ' , ' LYZ ' ] ,
. . . ' bulk _ labels ' , dendrogram = True )"""
|
if groupby not in adata . obs_keys ( ) or adata . obs [ groupby ] . dtype . name != 'category' :
raise ValueError ( 'groupby has to be a valid categorical observation. Given value: {}, ' 'valid categorical observations: {}' . format ( groupby , [ x for x in adata . obs_keys ( ) if adata . obs [ x ] . dtype . name == 'category' ] ) )
categories , obs_tidy = _prepare_dataframe ( adata , var_names , groupby , use_raw , log , None , gene_symbols = gene_symbols , layer = layer )
# get categories colors :
if groupby + "_colors" not in adata . uns :
from . _tools . scatterplots import _set_default_colors_for_categorical_obs
_set_default_colors_for_categorical_obs ( adata , groupby )
groupby_colors = adata . uns [ groupby + "_colors" ]
if dendrogram : # compute dendrogram if needed and reorder
# rows and columns to match leaves order .
dendro_data = _reorder_categories_after_dendrogram ( adata , groupby , dendrogram , var_names = var_names , var_group_labels = var_group_labels , var_group_positions = var_group_positions )
# reorder obs _ tidy
if dendro_data [ 'var_names_idx_ordered' ] is not None :
obs_tidy = obs_tidy . iloc [ : , dendro_data [ 'var_names_idx_ordered' ] ]
var_names = [ var_names [ x ] for x in dendro_data [ 'var_names_idx_ordered' ] ]
obs_tidy . index = obs_tidy . index . reorder_categories ( [ categories [ x ] for x in dendro_data [ 'categories_idx_ordered' ] ] , ordered = True )
categories = [ categories [ x ] for x in dendro_data [ 'categories_idx_ordered' ] ]
groupby_colors = [ groupby_colors [ x ] for x in dendro_data [ 'categories_idx_ordered' ] ]
obs_tidy = obs_tidy . sort_index ( )
goal_points = 1000
obs_tidy = _reduce_and_smooth ( obs_tidy , goal_points )
# obtain the start and end of each category and make
# a list of ranges that will be used to plot a different
# color
cumsum = [ 0 ] + list ( np . cumsum ( obs_tidy . index . value_counts ( sort = False ) ) )
x_values = [ ( x , y ) for x , y in zip ( cumsum [ : - 1 ] , cumsum [ 1 : ] ) ]
dendro_height = 1 if dendrogram else 0
groupby_height = 0.24
num_rows = len ( var_names ) + 2
# + 1 because of dendrogram on top and categories at bottom
if figsize is None :
width = 12
track_height = 0.25
else :
width , height = figsize
track_height = ( height - ( dendro_height + groupby_height ) ) / len ( var_names )
height_ratios = [ dendro_height ] + [ track_height ] * len ( var_names ) + [ groupby_height ]
height = sum ( height_ratios )
obs_tidy = obs_tidy . T
fig = pl . figure ( figsize = ( width , height ) )
axs = gridspec . GridSpec ( ncols = 2 , nrows = num_rows , wspace = 1.0 / width , hspace = 0 , height_ratios = height_ratios , width_ratios = [ width , 0.14 ] )
axs_list = [ ]
first_ax = None
for idx , var in enumerate ( var_names ) :
ax_idx = idx + 1
# this is because of the dendrogram
if first_ax is None :
ax = fig . add_subplot ( axs [ ax_idx , 0 ] )
first_ax = ax
else :
ax = fig . add_subplot ( axs [ ax_idx , 0 ] , sharex = first_ax )
axs_list . append ( ax )
for cat_idx , category in enumerate ( categories ) :
x_start , x_end = x_values [ cat_idx ]
ax . fill_between ( range ( x_start , x_end ) , 0 , obs_tidy . iloc [ idx , x_start : x_end ] , lw = 0.1 , color = groupby_colors [ cat_idx ] )
# remove the xticks labels except for the last processed plot .
# Because the plots share the x axis it is redundant and less compact to plot the
# axis for each plot
if idx < len ( var_names ) - 1 :
ax . tick_params ( labelbottom = False , labeltop = False , bottom = False , top = False )
ax . set_xlabel ( '' )
if log :
ax . set_yscale ( 'log' )
ax . spines [ 'left' ] . set_visible ( False )
ax . spines [ 'top' ] . set_visible ( False )
ax . spines [ 'bottom' ] . set_visible ( False )
ax . grid ( False )
ymin , ymax = ax . get_ylim ( )
ymax = int ( ymax )
ax . set_yticks ( [ ymax ] )
tt = ax . set_yticklabels ( [ str ( ymax ) ] , ha = 'left' , va = 'top' )
ax . spines [ 'right' ] . set_position ( ( 'axes' , 1.01 ) )
ax . tick_params ( axis = 'y' , labelsize = 'x-small' , right = True , left = False , length = 2 , which = 'both' , labelright = True , labelleft = False , direction = 'in' )
ax . set_ylabel ( var , rotation = 0 , fontsize = 'small' , ha = 'right' , va = 'bottom' )
ax . yaxis . set_label_coords ( - 0.005 , 0.1 )
ax . set_xlim ( 0 , x_end )
ax . tick_params ( axis = 'x' , bottom = False , labelbottom = False )
# the ax to plot the groupby categories is split to add a small space
# between the rest of the plot and the categories
axs2 = gridspec . GridSpecFromSubplotSpec ( 2 , 1 , subplot_spec = axs [ num_rows - 1 , 0 ] , height_ratios = [ 1 , 1 ] )
groupby_ax = fig . add_subplot ( axs2 [ 1 ] )
ticks , labels , groupby_cmap , norm = _plot_categories_as_colorblocks ( groupby_ax , obs_tidy . T , colors = groupby_colors , orientation = 'bottom' )
# add lines to plot
overlay_ax = fig . add_subplot ( axs [ 1 : - 1 , 0 ] , sharex = first_ax )
line_positions = np . cumsum ( obs_tidy . T . index . value_counts ( sort = False ) ) [ : - 1 ]
overlay_ax . vlines ( line_positions , 0 , 1 , lw = 0.5 , linestyle = "--" )
overlay_ax . axis ( 'off' )
overlay_ax . set_ylim ( 0 , 1 )
if dendrogram :
dendro_ax = fig . add_subplot ( axs [ 0 ] , sharex = first_ax )
_plot_dendrogram ( dendro_ax , adata , groupby , dendrogram_key = dendrogram , orientation = 'top' , ticks = ticks )
axs_list . append ( dendro_ax )
if var_group_positions is not None and len ( var_group_positions ) > 0 :
gene_groups_ax = fig . add_subplot ( axs [ 1 : - 1 , 1 ] )
arr = [ ]
for idx , pos in enumerate ( var_group_positions ) :
arr += [ idx ] * ( pos [ 1 ] + 1 - pos [ 0 ] )
gene_groups_ax . imshow ( np . matrix ( arr ) . T , aspect = 'auto' , cmap = groupby_cmap , norm = norm )
gene_groups_ax . axis ( 'off' )
axs_list . append ( gene_groups_ax )
utils . savefig_or_show ( 'tracksplot' , show = show , save = save )
return axs_list
|
def AddEventHandler ( self , classId = None , managedObject = None , prop = None , successValue = [ ] , failureValue = [ ] , transientValue = [ ] , pollSec = None , timeoutSec = None , callBack = None ) :
"""Adds an event handler .
An event handler can be added using this method where an user can subscribe for the event channel from UCS and can monitor those events for any
specific success value or failure value for a managed object .
- classId specifies the class name for which events should be monitored .
- managedObject specifies a particular managed object that user wants to monitor . prop specifies the the property of the managed object which will
be monitored . successValue , failureValue , transientValue specifies the respective scenarios ( success , failure etc ) about the prop of managed object .
- pollSec specifies the time in seconds for polling event .
- timeoutSec specifies the time after which method should stop polling or timeOut .
- callBack specifies the call Back Method or operation that can be given to this method"""
|
from UcsBase import WriteObject , _GenericMO , UcsUtils , WriteUcsWarning , UcsValidationException
if ( self . _transactionInProgress ) :
raise UcsValidationException ( "UCS transaction in progress. Cannot execute WatchUcs. Complete or Undo UCS transaction." )
# WriteUcsWarning ( " UCS transaction in progress . Cannot execute WatchUcs . Complete or Undo UCS transaction . " )
# return False
if ( classId != None and managedObject == None ) :
if ( UcsUtils . FindClassIdInMoMetaIgnoreCase ( classId ) == None ) :
raise UcsValidationException ( "Invalid ClassId %s specified." % ( classId ) )
# raise Exception ( " Invalid ClassId % s specified . " % ( classId ) )
elif ( managedObject != None and classId == None ) :
if ( UcsUtils . FindClassIdInMoMetaIgnoreCase ( managedObject . getattr ( "classId" ) ) == None ) :
raise UcsValidationException ( "Object of unknown ClassId %s provided." % ( managedObject . getattr ( "classId" ) ) )
# raise Exception ( " Object of unknown ClassId % s provided . " % ( managedObject . getattr ( " classId " ) ) )
if prop == None :
raise UcsValidationException ( "prop parameter is not provided." )
# raise Exception ( " prop parameter is not provided . " )
propMeta = UcsUtils . GetUcsPropertyMeta ( managedObject . getattr ( "classId" ) , UcsUtils . WordU ( prop ) )
if propMeta == None :
raise UcsValidationException ( "Unknown Property %s provided." % ( prop ) )
# raise Exception ( " Unknown Property % s provided . " % ( prop ) )
if not successValue :
raise UcsValidationException ( "successValue parameter is not provided." )
# raise Exception ( " successValue parameter is not provided . " )
elif ( classId != None and managedObject != None ) :
raise UcsValidationException ( "You cannot provide both classId and mandgedObject" )
# raise Exception ( " You cannot provide both classId and mandgedObject " )
wb = None
paramDict = { 'classId' : classId , 'managedObject' : managedObject , 'prop' : prop , 'successValue' : successValue , 'failureValue' : failureValue , 'transientValue' : transientValue , 'pollSec' : pollSec , 'timeoutSec' : timeoutSec , 'callBack' : callBack , 'startTime' : datetime . datetime . now ( ) }
if ( classId == None and managedObject == None ) :
def WatchUcsAllFilter ( mce ) :
return True
wb = self . _add_watch_block ( params = paramDict , filterCb = WatchUcsAllFilter , cb = callBack )
elif ( classId != None and managedObject == None ) :
def WatchUcsTypeFilter ( mce ) :
if ( ( mce . mo . getattr ( "classId" ) ) . lower ( ) == classId . lower ( ) ) :
return True
return False
wb = self . _add_watch_block ( params = paramDict , filterCb = WatchUcsTypeFilter , cb = callBack )
elif ( classId == None and managedObject != None ) :
if ( pollSec == None ) :
def WatchUcsMoFilter ( mce ) :
if ( mce . mo . getattr ( "Dn" ) == managedObject . getattr ( "Dn" ) ) :
return True
return False
wb = self . _add_watch_block ( params = paramDict , filterCb = WatchUcsMoFilter , cb = callBack )
else :
def WatchUcsNoneFilter ( mce ) :
return False
wb = self . _add_watch_block ( params = paramDict , filterCb = WatchUcsNoneFilter , cb = callBack )
signal . signal ( signal . SIGINT , self . _stop_wb_callback )
if ( wb == None ) :
raise UcsValidationException ( "Error adding WatchBlock..." )
# raise Exception ( " Error adding WatchBlock . . . " )
if wb != None and len ( self . _wbs ) == 1 :
self . _start_dequeue_thread ( )
return wb
|
def colinear_pairs ( segments , radius = .01 , angle = .01 , length = None ) :
"""Find pairs of segments which are colinear .
Parameters
segments : ( n , 2 , ( 2 , 3 ) ) float
Two or three dimensional line segments
radius : float
Maximum radius line origins can differ
and be considered colinear
angle : float
Maximum angle in radians segments can
differ and still be considered colinear
length : None or float
If specified , will additionally require
that pairs have a mean vertex distance less
than this value from each other to qualify .
Returns
pairs : ( m , 2 ) int
Indexes of segments which are colinear"""
|
from scipy import spatial
# convert segments to parameterized origins
# which are the closest point on the line to
# the actual zero - origin
origins , vectors , param = segments_to_parameters ( segments )
# create a kdtree for origins
tree = spatial . cKDTree ( origins )
# find origins closer than specified radius
pairs = tree . query_pairs ( r = radius , output_type = 'ndarray' )
# calculate angles between pairs
angles = geometry . vector_angle ( vectors [ pairs ] )
# angles can be within tolerance of 180 degrees or 0.0 degrees
angle_ok = np . logical_or ( util . isclose ( angles , np . pi , atol = angle ) , util . isclose ( angles , 0.0 , atol = angle ) )
# apply angle threshold
colinear = pairs [ angle_ok ]
# if length is specified check endpoint proximity
if length is not None : # make sure parameter pairs are ordered
param . sort ( axis = 1 )
# calculate the mean parameter distance for each colinear pair
distance = param [ colinear ] . ptp ( axis = 1 ) . mean ( axis = 1 )
# if the MEAN distance is less than specified length consider
# the segment to be identical : worst case single - vertex
# distance is 2 * length
identical = distance < length
# remove non - identical pairs
colinear = colinear [ identical ]
return colinear
|
def ensure_data ( ) :
'''Ensure that the Garuda directory and files'''
|
if not os . path . exists ( GARUDA_DIR ) :
os . makedirs ( GARUDA_DIR )
Path ( f'{GARUDA_DIR}/__init__.py' ) . touch ( )
|
def is_extracted ( self , file_path ) :
"""Check if the data file is already extracted ."""
|
if os . path . isdir ( file_path ) :
self . chatbot . logger . info ( 'File is already extracted' )
return True
return False
|
def serialize_ndarray_b64 ( o ) :
"""Serializes a : obj : ` numpy . ndarray ` in a format where the datatype and shape are
human - readable , but the array data itself is binary64 encoded .
Args :
o ( : obj : ` numpy . ndarray ` ) : : obj : ` ndarray ` to be serialized .
Returns :
A dictionary that can be passed to : obj : ` json . dumps ` ."""
|
if o . flags [ 'C_CONTIGUOUS' ] :
o_data = o . data
else :
o_data = np . ascontiguousarray ( o ) . data
data_b64 = base64 . b64encode ( o_data )
return dict ( _type = 'np.ndarray' , data = data_b64 . decode ( 'utf-8' ) , dtype = o . dtype , shape = o . shape )
|
def make_message ( self , data ) :
"""Create a Message instance from data , data will be loaded
via munge according to the codec specified in the
transport _ content _ type attribute
Returns :
Message : message object"""
|
data = self . codec . loads ( data )
msg = Message ( data . get ( "data" ) , * data . get ( "args" , [ ] ) , ** data . get ( "kwargs" , { } ) )
msg . meta . update ( data . get ( "meta" ) )
self . trigger ( "make_message" , data , msg )
return msg
|
def send_music ( self , user_id , url , hq_url , thumb_media_id , title = None , description = None , account = None ) :
"""发送音乐消息
详情请参考
http : / / mp . weixin . qq . com / wiki / 7/12a5a320ae96fecdf0e15cb06123de9f . html
: param user _ id : 用户 ID 。 就是你收到的 ` Message ` 的 source
: param url : 音乐链接
: param hq _ url : 高品质音乐链接 , wifi环境优先使用该链接播放音乐
: param thumb _ media _ id : 缩略图的媒体ID 。 可以通过 : func : ` upload _ media ` 上传 。
: param title : 音乐标题
: param description : 音乐描述
: param account : 可选 , 客服账号
: return : 返回的 JSON 数据包"""
|
music_data = { 'musicurl' : url , 'hqmusicurl' : hq_url , 'thumb_media_id' : thumb_media_id }
if title :
music_data [ 'title' ] = title
if description :
music_data [ 'description' ] = description
data = { 'touser' : user_id , 'msgtype' : 'music' , 'music' : music_data }
return self . _send_custom_message ( data , account = account )
|
def save ( self , filename , addnormalised = False ) :
"""Save a frequency list to file , can be loaded later using the load method"""
|
f = io . open ( filename , 'w' , encoding = 'utf-8' )
for line in self . output ( "\t" , addnormalised ) :
f . write ( line + '\n' )
f . close ( )
|
def loadProfile ( self , profile , inspectorFullName = None ) :
"""Reads the persistent program settings for the current profile .
If inspectorFullName is given , a window with this inspector will be created if it wasn ' t
already created in the profile . All windows with this inspector will be raised ."""
|
settings = QtCore . QSettings ( )
logger . info ( "Reading profile {!r} from: {}" . format ( profile , settings . fileName ( ) ) )
self . _profile = profile
profGroupName = self . profileGroupName ( profile )
# Instantiate windows from groups
settings . beginGroup ( profGroupName )
try :
for windowGroupName in settings . childGroups ( ) :
if windowGroupName . startswith ( 'window' ) :
settings . beginGroup ( windowGroupName )
try :
self . addNewMainWindow ( settings = settings )
finally :
settings . endGroup ( )
finally :
settings . endGroup ( )
if inspectorFullName is not None :
windows = [ win for win in self . _mainWindows if win . inspectorFullName == inspectorFullName ]
if len ( windows ) == 0 :
logger . info ( "Creating window for inspector: {!r}" . format ( inspectorFullName ) )
try :
win = self . addNewMainWindow ( inspectorFullName = inspectorFullName )
except KeyError :
logger . warn ( "No inspector found with ID: {}" . format ( inspectorFullName ) )
else :
for win in windows :
win . raise_ ( )
if len ( self . mainWindows ) == 0 :
logger . info ( "No open windows in profile (creating one)." )
self . addNewMainWindow ( inspectorFullName = DEFAULT_INSPECTOR )
|
def download ( self , attachment_id , destination ) :
"""Download an attachment from Zendesk .
: param attachment _ id : id of the attachment to download
: param destination : destination path . If a directory , the file will be placed in the directory with
the filename from the Attachment object .
: return : the path the file was written to"""
|
attachment = self ( id = attachment_id )
if os . path . isdir ( destination ) :
destination = os . path . join ( destination , attachment . file_name )
return self . _download_file ( attachment . content_url , destination )
|
def save ( self , checkpoint_dir = None ) :
"""Saves the current model state to a checkpoint .
Subclasses should override ` ` _ save ( ) ` ` instead to save state .
This method dumps additional metadata alongside the saved path .
Args :
checkpoint _ dir ( str ) : Optional dir to place the checkpoint .
Returns :
Checkpoint path that may be passed to restore ( ) ."""
|
checkpoint_dir = os . path . join ( checkpoint_dir or self . logdir , "checkpoint_{}" . format ( self . _iteration ) )
if not os . path . exists ( checkpoint_dir ) :
os . makedirs ( checkpoint_dir )
checkpoint = self . _save ( checkpoint_dir )
saved_as_dict = False
if isinstance ( checkpoint , string_types ) :
if ( not checkpoint . startswith ( checkpoint_dir ) or checkpoint == checkpoint_dir ) :
raise ValueError ( "The returned checkpoint path must be within the " "given checkpoint dir {}: {}" . format ( checkpoint_dir , checkpoint ) )
if not os . path . exists ( checkpoint ) :
raise ValueError ( "The returned checkpoint path does not exist: {}" . format ( checkpoint ) )
checkpoint_path = checkpoint
elif isinstance ( checkpoint , dict ) :
saved_as_dict = True
checkpoint_path = os . path . join ( checkpoint_dir , "checkpoint" )
with open ( checkpoint_path , "wb" ) as f :
pickle . dump ( checkpoint , f )
else :
raise ValueError ( "`_save` must return a dict or string type: {}" . format ( str ( type ( checkpoint ) ) ) )
with open ( checkpoint_path + ".tune_metadata" , "wb" ) as f :
pickle . dump ( { "experiment_id" : self . _experiment_id , "iteration" : self . _iteration , "timesteps_total" : self . _timesteps_total , "time_total" : self . _time_total , "episodes_total" : self . _episodes_total , "saved_as_dict" : saved_as_dict } , f )
return checkpoint_path
|
def format_stack_frame ( stack_frame , project : 'projects.Project' ) -> dict :
"""Formats a raw stack frame into a dictionary formatted for render
templating and enriched with information from the currently open project .
: param stack _ frame :
A raw stack frame to turn into an enriched version for templating .
: param project :
The currently open project , which is used to contextualize stack
information with project - specific information .
: return :
A dictionary containing the enriched stack frame data ."""
|
filename = stack_frame . filename
if filename . startswith ( project . source_directory ) :
filename = filename [ len ( project . source_directory ) + 1 : ]
location = stack_frame . name
if location == '<module>' :
location = None
return dict ( filename = filename , location = location , line_number = stack_frame . lineno , line = stack_frame . line )
|
def get_accounts ( self , fetch = False ) :
"""Return this Wallet ' s accounts object , populating it if fetch is True ."""
|
return Accounts ( self . resource . accounts , self . client , wallet = self , populate = fetch )
|
def get_file_suffix ( self , path , prefix ) :
"""Gets a valid filename"""
|
parent = self . _ensure_folder_path ( path )
file_list = self . drive . ListFile ( { 'q' : "'{}' in parents and trashed=false and title contains '{}'" . format ( parent [ 'id' ] , prefix ) } ) . GetList ( )
try :
number_of_files = len ( file_list )
except :
number_of_files = 0
return '{0:04}' . format ( number_of_files )
|
def iter_comments ( self , number = - 1 , etag = None ) :
"""Iterate over the comments on this pull request .
: param int number : ( optional ) , number of comments to return . Default :
-1 returns all available comments .
: param str etag : ( optional ) , ETag from a previous request to the same
endpoint
: returns : generator of : class : ` ReviewComment < ReviewComment > ` \ s"""
|
url = self . _build_url ( 'comments' , base_url = self . _api )
return self . _iter ( int ( number ) , url , ReviewComment , etag = etag )
|
def extract_name_value_type ( self , name , value , limit_size = False ) :
"""Extracts value of any object , eventually reduces it ' s size and
returns name , truncated value and type ( for str with size appended )"""
|
MAX_STRING_LEN_TO_RETURN = 487
try :
t_value = repr ( value )
except :
t_value = "Error while extracting value"
# convert all var names to string
if isinstance ( name , str ) :
r_name = name
else :
r_name = repr ( name )
# truncate value to limit data flow between ikpdb and client
if len ( t_value ) > MAX_STRING_LEN_TO_RETURN :
r_value = "%s ... (truncated by ikpdb)" % ( t_value [ : MAX_STRING_LEN_TO_RETURN ] , )
r_name = "%s*" % r_name
# add a visual marker to truncated var ' s name
else :
r_value = t_value
if isinstance ( value , str ) :
r_type = "%s [%s]" % ( IKPdbRepr ( value ) , len ( value ) , )
else :
r_type = IKPdbRepr ( value )
return r_name , r_value , r_type
|
def vline ( self , x : int , y : int , height : int , bg_blend : int = tcod . constants . BKGND_DEFAULT , ) -> None :
"""Draw a vertical line on the console .
This always uses ord ( ' │ ' ) , the vertical line character .
Args :
x ( int ) : The x coordinate from the left .
y ( int ) : The y coordinate from the top .
height ( int ) : The horozontal length of this line .
bg _ blend ( int ) : The background blending flag .
. . deprecated : : 8.5
Console methods which depend on console defaults have been
deprecated .
Use : any : ` Console . draw _ rect ` instead , calling this function will
print a warning detailing which default values need to be made
explicit ."""
|
self . __deprecate_defaults ( "draw_rect" , bg_blend )
lib . TCOD_console_vline ( self . console_c , x , y , height , bg_blend )
|
def get ( args ) :
"""Retrieve records .
Argument :
args : arguments object"""
|
password = get_password ( args )
token = connect . get_token ( args . username , password , args . server )
if args . __dict__ . get ( 'search' ) : # When using ' - - search ' option
keyword = args . search
else :
keyword = ''
if args . __dict__ . get ( 'raw_flag' ) :
raw_flag = True
else :
raw_flag = False
if args . __dict__ . get ( 'domain' ) : # When specified zone
domain = args . domain
data = processing . get_zone ( args . server , token , domain , keyword , raw_flag )
return data
else : # When get all zones
processing . get_all_zone ( args . server , token )
|
def stack_clip ( s_orig , extent , out_stack_fn = None , copy = True , save = False ) :
"""Create a new stack object with limited extent from an exising stack object"""
|
# Should check for valid extent
# This is not memory efficient , but is much simpler
# To be safe , if we are saving out , create a copy to avoid overwriting
if copy or save :
from copy import deepcopy
print ( "Copying original DEMStack" )
s = deepcopy ( s_orig )
else : # Want to be very careful here , as we could overwrite the original file
s = s_orig
from pygeotools . lib import geolib
gt = s . gt
s_shape = s . ma_stack . shape [ 1 : 3 ]
# Compute pixel bounds for input extent
min_x_px , max_y_px = geolib . mapToPixel ( extent [ 0 ] , extent [ 1 ] , gt )
max_x_px , min_y_px = geolib . mapToPixel ( extent [ 2 ] , extent [ 3 ] , gt )
# Clip to stack extent and round to whole integers
min_x_px = int ( max ( 0 , min_x_px ) + 0.5 )
max_x_px = int ( min ( s_shape [ 1 ] , max_x_px ) + 0.5 )
min_y_px = int ( max ( 0 , min_y_px ) + 0.5 )
max_y_px = int ( min ( s_shape [ 0 ] , max_y_px ) + 0.5 )
# Clip the stack
x_slice = slice ( min_x_px , max_x_px )
y_slice = slice ( min_y_px , max_y_px )
s . ma_stack = s . ma_stack [ : , y_slice , x_slice ]
# Now update geospatial info
# This returns the pixel center in map coordinates
# Want to remove 0.5 px offset for upper left corner in gt
out_ul = geolib . pixelToMap ( min_x_px - 0.5 , min_y_px - 0.5 , gt )
# Update stack geotransform
s . gt [ 0 ] = out_ul [ 0 ]
s . gt [ 3 ] = out_ul [ 1 ]
# Update new stack extent
s . get_extent ( )
# Check for and discard emtpy arrays
# Might be faster to reshape then np . ma . count ( s . ma _ stack , axis = 1)
count_list = np . array ( [ i . count ( ) for i in s . ma_stack ] )
idx = count_list > 0
# Output subset with valid data in next extent
# fn _ list , source , error , error _ dict _ list , date _ list , date _ list _ o
# Note : no need to copy again
s_sub = get_stack_subset ( s , idx , out_stack_fn = out_stack_fn , copy = False , save = False )
print ( "Orig filename:" , s_orig . stack_fn )
print ( "Orig extent:" , s_orig . extent )
print ( "Orig dimensions:" , s_orig . ma_stack . shape )
print ( "Input extent:" , extent )
print ( "New filename:" , s_sub . stack_fn )
print ( "New extent:" , s_sub . extent )
print ( "New dimensions:" , s_sub . ma_stack . shape )
if save :
if os . path . abspath ( s_orig . stack_fn ) == os . path . abspath ( s_sub . stack_fn ) :
print ( "Original stack would be overwritten!" )
print ( "Skipping save" )
else :
s_sub . save = True
s_sub . savestack ( )
# The following should be unchanged by clip - it is more efficient to clip thes , but easier to regenerate
# if s . stats :
# stack _ count , stack _ mean , stack _ min , stack _ max , stack _ std
# s . stack _ min = s . stack _ min [ y _ slice , x _ slice ]
# if s . datestack :
# dt _ ptp , dt _ min , dt _ max , dt _ center
# if s . med :
# stack _ med
# if s . trend :
# trend , intercept , detrended _ std
# Recompute stats / etc
return s_sub
|
def get_code ( results ) :
"""Determines the exit status code to be returned from a script by
inspecting the results returned from validating file ( s ) .
Status codes are binary OR ' d together , so exit codes can communicate
multiple error conditions ."""
|
status = EXIT_SUCCESS
for file_result in results :
error = any ( object_result . errors for object_result in file_result . object_results )
fatal = file_result . fatal
if error :
status |= EXIT_SCHEMA_INVALID
if fatal :
status |= EXIT_VALIDATION_ERROR
return status
|
def read_json ( fn ) :
"""Convenience method to read pyquil . operator _ estimation objects from a JSON file .
See : py : func : ` to _ json ` ."""
|
with open ( fn ) as f :
return json . load ( f , object_hook = _operator_object_hook )
|
def update_extent ( self , operation ) :
"""Updates the extent of the * module * and its elements using the
specified operation which has already been executed ."""
|
# Operations represent continuous lines of code .
# The last operation to be executed is still the current one .
line , col = operation . start
operation . context . module . update_elements ( line , col , operation . length , operation . docdelta )
|
def computeFeedForwardActivity ( self , feedForwardInput ) :
"""Activate trnCells according to the l6Input . These in turn will impact
bursting mode in relay cells that are connected to these trnCells .
Given the feedForwardInput , compute which cells will be silent , tonic ,
or bursting .
: param feedForwardInput :
a numpy matrix of shape relayCellShape containing 0 ' s and 1 ' s
: return :
feedForwardInput is modified to contain 0 , 1 , or 2 . A " 2 " indicates
bursting cells ."""
|
ff = feedForwardInput . copy ( )
# For each relay cell , see if any of its FF inputs are active .
for x in range ( self . relayWidth ) :
for y in range ( self . relayHeight ) :
inputCells = self . _preSynapticFFCells ( x , y )
for idx in inputCells :
if feedForwardInput [ idx ] != 0 :
ff [ x , y ] = 1.0
continue
# If yes , and it is in burst mode , this cell bursts
# If yes , and it is not in burst mode , then we just get tonic input .
# ff + = self . burstReadyCells * ff
ff2 = ff * 0.4 + self . burstReadyCells * ff
return ff2
|
def _send_file ( self , method , path , data , filename ) :
"""Make a multipart / form - encoded request .
Args :
` method ` : The method of the request ( POST or PUT ) .
` path ` : The path to the resource .
` data ` : The JSON - encoded data .
` filename ` : The filename of the file to send .
Returns :
The content of the response .
Raises :
An exception depending on the HTTP status code of the response ."""
|
with open ( filename , 'r' ) as f :
return self . _make_request ( method , path , data = data , files = [ f , ] )
|
def jsend_output ( fail_exception_classes = None ) :
"""Format task result to json output in jsend specification format . See :
https : / / github . com / omniti - labs . Task return value must be dict or None .
@ param fail _ exception _ classes : exceptions which will produce ' fail ' response
status ."""
|
fail_exception_classes = fail_exception_classes if fail_exception_classes else ( )
def decorator_fn ( f ) :
@ wraps ( f )
@ json_output
def jsend_output_decorator ( * args , ** kwargs ) :
try :
rv = f ( * args , ** kwargs )
except fail_exception_classes as e :
return { 'status' : 'fail' , 'data' : { 'message' : str ( e ) } }
except Exception as e :
logging . error ( str ( e ) + "\n" + traceback . format_exc ( ) )
return { 'status' : 'error' , 'message' : 'Server error.' }
if not isinstance ( rv , dict ) and rv is not None :
msg = 'jsend_output decorator error: task must return dict ' 'or None.\nTask return value: {0}.'
logging . error ( msg . format ( rv ) )
return { 'status' : 'error' , 'message' : 'Server error.' }
return { 'status' : 'success' , 'data' : rv }
return jsend_output_decorator
return decorator_fn
|
def subscribe ( self , topics = ( ) , pattern = None , listener = None ) :
"""Subscribe to a list of topics , or a topic regex pattern .
Partitions will be dynamically assigned via a group coordinator .
Topic subscriptions are not incremental : this list will replace the
current assignment ( if there is one ) .
This method is incompatible with assign _ from _ user ( )
Arguments :
topics ( list ) : List of topics for subscription .
pattern ( str ) : Pattern to match available topics . You must provide
either topics or pattern , but not both .
listener ( ConsumerRebalanceListener ) : Optionally include listener
callback , which will be called before and after each rebalance
operation .
As part of group management , the consumer will keep track of the
list of consumers that belong to a particular group and will
trigger a rebalance operation if one of the following events
trigger :
* Number of partitions change for any of the subscribed topics
* Topic is created or deleted
* An existing member of the consumer group dies
* A new member is added to the consumer group
When any of these events are triggered , the provided listener
will be invoked first to indicate that the consumer ' s assignment
has been revoked , and then again when the new assignment has
been received . Note that this listener will immediately override
any listener set in a previous call to subscribe . It is
guaranteed , however , that the partitions revoked / assigned
through this interface are from topics subscribed in this call ."""
|
if self . _user_assignment or ( topics and pattern ) :
raise IllegalStateError ( self . _SUBSCRIPTION_EXCEPTION_MESSAGE )
assert topics or pattern , 'Must provide topics or pattern'
if pattern :
log . info ( 'Subscribing to pattern: /%s/' , pattern )
self . subscription = set ( )
self . subscribed_pattern = re . compile ( pattern )
else :
self . change_subscription ( topics )
if listener and not isinstance ( listener , ConsumerRebalanceListener ) :
raise TypeError ( 'listener must be a ConsumerRebalanceListener' )
self . listener = listener
|
def _handle_configspec ( self , configspec ) :
"""Parse the configspec ."""
|
# FIXME : Should we check that the configspec was created with the
# correct settings ? ( i . e . ` ` list _ values = False ` ` )
if not isinstance ( configspec , ConfigObj ) :
try :
configspec = ConfigObj ( configspec , raise_errors = True , file_error = True , _inspec = True )
except ConfigObjError as e : # FIXME : Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError ( 'Parsing configspec failed: %s' % e )
except IOError as e :
raise IOError ( 'Reading configspec failed: %s' % e )
self . configspec = configspec
|
def main ( ) :
"""Runs a datagenerator from the command - line . Calls JVM start / stop automatically .
Use - h to see all options ."""
|
parser = argparse . ArgumentParser ( description = 'Executes a data generator from the command-line. Calls JVM start/stop automatically.' )
parser . add_argument ( "-j" , metavar = "classpath" , dest = "classpath" , help = "additional classpath, jars/directories" )
parser . add_argument ( "-X" , metavar = "heap" , dest = "heap" , help = "max heap size for jvm, e.g., 512m" )
parser . add_argument ( "datagenerator" , help = "data generator classname, e.g., " + "weka.datagenerators.classifiers.classification.LED24" )
parser . add_argument ( "option" , nargs = argparse . REMAINDER , help = "additional data generator options" )
parsed = parser . parse_args ( )
jars = [ ]
if parsed . classpath is not None :
jars = parsed . classpath . split ( os . pathsep )
jvm . start ( jars , max_heap_size = parsed . heap , packages = True )
logger . debug ( "Commandline: " + join_options ( sys . argv [ 1 : ] ) )
try :
generator = DataGenerator ( classname = parsed . datagenerator )
if len ( parsed . option ) > 0 :
generator . options = parsed . option
DataGenerator . make_data ( generator , parsed . option )
except Exception as e :
print ( e )
finally :
jvm . stop ( )
|
def _finish_connection_action ( self , action ) :
"""Finish a connection attempt
Args :
action ( ConnectionAction ) : the action object describing what we are
connecting to and what the result of the operation was"""
|
success = action . data [ 'success' ]
conn_key = action . data [ 'id' ]
if self . _get_connection_state ( conn_key ) != self . Connecting :
print ( "Invalid finish_connection action on a connection whose state is not Connecting, conn_key=%s" % str ( conn_key ) )
return
# Cannot be None since we checked above to make sure it exists
data = self . _get_connection ( conn_key )
callback = data [ 'callback' ]
conn_id = data [ 'conn_id' ]
int_id = data [ 'int_id' ]
if success is False :
reason = action . data [ 'reason' ]
if reason is None :
reason = "No reason was given"
del self . _connections [ conn_id ]
del self . _int_connections [ int_id ]
callback ( conn_id , self . id , False , reason )
else :
data [ 'state' ] = self . Idle
data [ 'microstate' ] = None
data [ 'callback' ] = None
callback ( conn_id , self . id , True , None )
|
def get ( self , key , default = None , cast = None , fresh = False , dotted_lookup = True ) :
"""Get a value from settings store , this is the prefered way to access : :
> > > from dynaconf import settings
> > > settings . get ( ' KEY ' )
: param key : The name of the setting value , will always be upper case
: param default : In case of not found it will be returned
: param cast : Should cast in to @ int , @ float , @ bool or @ json ?
: param fresh : Should reload from loaders store before access ?
: param dotted _ lookup : Should perform dotted - path lookup ?
: return : The value if found , default or None"""
|
key = key . upper ( )
if "." in key and dotted_lookup :
return self . _dotted_get ( dotted_key = key , default = default , cast = cast , fresh = fresh )
if key in self . _deleted :
return default
if ( fresh or self . _fresh or key in getattr ( self , "FRESH_VARS_FOR_DYNACONF" , ( ) ) ) and key not in dir ( default_settings ) :
self . unset ( key )
self . execute_loaders ( key = key )
store = self . _memoized or self . store
data = store . get ( key , default )
if cast :
data = converters . get ( cast ) ( data )
return data
|
def demo_err ( ) :
"""This demo shows how the error in the estimate varies depending
on how many data points are included in the interpolation
( m parameter in this function ) ."""
|
max_order = 7
n = 20
l = 0.25
fmt1 = '{0: <5s}\t{1: <21s}\t{2: >21s}\t{3: >21s}\t{4: >21s}'
fmt2 = '{0: <5d}\t{1:20.18f}\t{2: >21.18f}\t{3: >21.18f}\t{4: >21.18f}'
x = np . cumsum ( np . random . rand ( n ) * l )
x = np . concatenate ( ( x [ : : - 1 ] * - 1 , x ) )
lst = [ ]
derivs = np . zeros ( n )
for order in range ( max_order + 1 ) :
print ( 'Order' , order )
for m in range ( 1 + order // 2 , n + 1 ) :
sub_x = x [ n - m : n + m ]
derivs [ m - 1 ] = derivatives_at_point_by_finite_diff ( sub_x , np . exp ( sub_x ) , 0 , order ) [ order ]
print ( fmt1 . format ( 'm' , 'val' , 'diff' , 'analytical error' , 'diff/analytical' ) )
for m in range ( 1 , n ) :
print ( fmt2 . format ( ( m + 1 ) * 2 , derivs [ m ] , derivs [ m ] - derivs [ m - 1 ] , derivs [ m ] - 1 , ( derivs [ m ] - derivs [ m - 1 ] ) / ( derivs [ m ] - 1 ) ) )
lst . append ( ( derivs [ - 1 ] , abs ( derivs [ - 1 ] - derivs [ - 2 ] ) ) )
print ( np . array ( lst ) )
|
def process_request ( self , request ) :
"""Process a Django request and authenticate users .
If a JWT authentication header is detected and it is determined to be valid , the user is set as
` ` request . user ` ` and CSRF protection is disabled ( ` ` request . _ dont _ enforce _ csrf _ checks = True ` ` ) on
the request .
: param request : Django Request instance"""
|
if 'HTTP_AUTHORIZATION' not in request . META :
return
try :
method , claim = request . META [ 'HTTP_AUTHORIZATION' ] . split ( ' ' , 1 )
except ValueError :
return
if method . upper ( ) != AUTH_METHOD :
return
username = token . get_claimed_username ( claim )
if not username :
return
User = get_user_model ( )
try :
user = User . objects . get ( username = username )
except User . DoesNotExist :
return
claim_data = None
for public in user . public_keys . all ( ) :
claim_data = token . verify ( claim , public . key , validate_nonce = self . validate_nonce )
if claim_data :
break
if not claim_data :
return
logger . debug ( 'Successfully authenticated %s using JWT' , user . username )
request . _dont_enforce_csrf_checks = True
request . user = user
|
def username_password_authn ( environ , start_response , reference , key , redirect_uri ) :
"""Display the login form"""
|
logger . info ( "The login page" )
headers = [ ]
resp = Response ( mako_template = "login.mako" , template_lookup = LOOKUP , headers = headers )
argv = { "action" : "/verify" , "login" : "" , "password" : "" , "key" : key , "authn_reference" : reference , "redirect_uri" : redirect_uri }
logger . info ( "do_authentication argv: %s" , argv )
return resp ( environ , start_response , ** argv )
|
def get_crumb_list_by_selector ( self , crumb_selector ) :
"""Return a list of crumbs ."""
|
return [ self . parsedpage . get_text_from_node ( crumb ) for crumb in self . parsedpage . get_nodes_by_selector ( crumb_selector ) ]
|
def dump ( config ) :
"""Returns the entire content of the config object in a way that can
be easily examined , compared or dumped to a string or file .
: param config : The configuration object to dump
: rtype : dict"""
|
def _dump ( element ) :
if not isinstance ( element , config . __class__ ) :
return element
section = dict ( )
for key , subsection in element . _subsections . items ( ) :
section [ key ] = _dump ( subsection )
for key in element :
section [ key ] = getattr ( element , key )
return section
return _dump ( config )
|
def upload_video ( self , media_id , title , description ) :
"""群发视频消息时获取视频 media _ id
详情请参考
http : / / mp . weixin . qq . com / wiki / 15/5380a4e6f02f2ffdc7981a8ed7a40753 . html
: param media _ id : 需通过基础支持中的上传下载多媒体文件 : func : ` upload ` 来得到
: param title : 视频标题
: param description : 视频描述
: return : 返回的 JSON 数据包"""
|
return self . _post ( url = 'media/uploadvideo' , data = { 'media_id' : media_id , 'title' : title , 'description' : description } )
|
def get_object ( self , group , key = None ) :
"""get object"""
|
return self . get_queryset_by_group_and_key ( group = group , key = key ) . first ( )
|
def rand_ssn ( ) :
"""Random SSN . ( 9 digits )
Example : :
> > > rand _ ssn ( )
295-50-0178"""
|
return "%s-%s-%s" % ( rand_str ( 3 , string . digits ) , rand_str ( 2 , string . digits ) , rand_str ( 4 , string . digits ) )
|
def get_all_lexers ( ) :
"""Return a generator of tuples in the form ` ` ( name , aliases ,
filenames , mimetypes ) ` ` of all know lexers ."""
|
for item in itervalues ( LEXERS ) :
yield item [ 1 : ]
for lexer in find_plugin_lexers ( ) :
yield lexer . name , lexer . aliases , lexer . filenames , lexer . mimetypes
|
def send_hostinfo ( config ) :
'''Register this host on OpenSubmit test machine .'''
|
info = all_host_infos ( )
logger . debug ( "Sending host information: " + str ( info ) )
post_data = [ ( "Config" , json . dumps ( info ) ) , ( "Action" , "get_config" ) , ( "UUID" , config . get ( "Server" , "uuid" ) ) , ( "Address" , ipaddress ( ) ) , ( "Secret" , config . get ( "Server" , "secret" ) ) ]
send_post ( config , "/machines/" , post_data )
|
def get_redirect_url ( self , ** kwargs ) :
"""Return the authorization / authentication URL signed with the request
token ."""
|
params = { 'oauth_token' : self . get_request_token ( ) . key , }
return '%s?%s' % ( self . auth_url , urllib . urlencode ( params ) )
|
def quoteattrs ( data ) :
'''Takes dict of attributes and returns their HTML representation'''
|
items = [ ]
for key , value in data . items ( ) :
items . append ( '{}={}' . format ( key , quoteattr ( value ) ) )
return ' ' . join ( items )
|
def was_modified_since ( header = None , mtime = 0 , size = 0 ) :
'''Check if an item was modified since the user last downloaded it
: param header : the value of the ` ` If - Modified - Since ` ` header .
If this is ` ` None ` ` , simply return ` ` True ` `
: param mtime : the modification time of the item in question .
: param size : the size of the item .'''
|
header_mtime = modified_since ( header , size )
if header_mtime and header_mtime <= mtime :
return False
return True
|
def on_selection_changed ( self ) :
"""Callback invoked one the selection has changed ."""
|
d = self . declaration
selection = self . scene . selectedItems ( )
self . _guards |= 0x01
try :
d . selected_items = [ item . ref ( ) . declaration for item in selection if item . ref ( ) ]
finally :
self . _guards &= ~ 0x01
|
def _link_or_update_vars ( self ) :
"""Creates or updates the symlink to group _ vars and returns None .
: returns : None"""
|
for d , source in self . links . items ( ) :
target = os . path . join ( self . inventory_directory , d )
source = os . path . join ( self . _config . scenario . directory , source )
if not os . path . exists ( source ) :
msg = "The source path '{}' does not exist." . format ( source )
util . sysexit_with_message ( msg )
msg = "Inventory {} linked to {}" . format ( source , target )
LOG . info ( msg )
os . symlink ( source , target )
|
def norm ( self , x ) :
"""Return the norm of ` ` x ` ` .
Parameters
x : ` LinearSpaceElement `
Element whose norm to compute .
Returns
norm : float
Norm of ` ` x ` ` ."""
|
if x not in self :
raise LinearSpaceTypeError ( '`x` {!r} is not an element of ' '{!r}' . format ( x , self ) )
return float ( self . _norm ( x ) )
|
def has_permission ( self ) :
"""Permission checking for " normal " Django ."""
|
objs = [ None ]
if hasattr ( self , 'get_perms_objects' ) :
objs = self . get_perms_objects ( )
else :
if hasattr ( self , 'get_object' ) :
try :
objs = [ self . get_object ( ) ]
except Http404 :
raise
except :
pass
if objs == [ None ] :
objs = self . get_queryset ( )
if ( hasattr ( self , 'permission_filter_queryset' ) and self . permission_filter_queryset is not False and self . request . method == 'GET' ) :
if objs != [ None ] :
self . perms_filter_queryset ( objs )
return True
else :
return check_perms ( self . request . user , self . get_permission_required ( ) , objs , self . request . method )
|
def _ParseRecord ( self , parser_mediator , record_index , evtx_record , recovered = False ) :
"""Extract data from a Windows XML EventLog ( EVTX ) record .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
record _ index ( int ) : event record index .
evtx _ record ( pyevtx . record ) : event record .
recovered ( Optional [ bool ] ) : True if the record was recovered ."""
|
event_data = self . _GetEventData ( parser_mediator , record_index , evtx_record , recovered = recovered )
try :
written_time = evtx_record . get_written_time_as_integer ( )
except OverflowError as exception :
parser_mediator . ProduceExtractionWarning ( ( 'unable to read written time from event record: {0:d} ' 'with error: {1!s}' ) . format ( record_index , exception ) )
written_time = None
if not written_time :
date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
else :
date_time = dfdatetime_filetime . Filetime ( timestamp = written_time )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data )
|
def _convert_nest_to_flat ( self , params , _result = None , _prefix = None ) :
"""Convert a data structure that looks like : :
{ " foo " : { " bar " : " baz " , " shimmy " : " sham " } }
to : :
{ " foo . bar " : " baz " ,
" foo . shimmy " : " sham " }
This is the inverse of L { _ convert _ flat _ to _ nest } ."""
|
if _result is None :
_result = { }
for k , v in params . iteritems ( ) :
if _prefix is None :
path = k
else :
path = _prefix + '.' + k
if isinstance ( v , dict ) :
self . _convert_nest_to_flat ( v , _result = _result , _prefix = path )
else :
_result [ path ] = v
return _result
|
def main ( ) :
'''main program loop'''
|
conn = symphony . Config ( 'example-bot.cfg' )
# connect to pod
agent , pod , symphony_sid = conn . connect ( )
agent . test_echo ( 'test' )
# main loop
msgFormat = 'MESSAGEML'
message = '<messageML> hello world. </messageML>'
# send message
agent . send_message ( symphony_sid , msgFormat , message )
|
def split_sections ( s ) :
"""Split a string or iterable thereof into ( section , content ) pairs
Each ` ` section ` ` is a stripped version of the section header ( " [ section ] " )
and each ` ` content ` ` is a list of stripped lines excluding blank lines and
comment - only lines . If there are any such lines before the first section
header , they ' re returned in a first ` ` section ` ` of ` ` None ` ` ."""
|
section = None
content = [ ]
for line in yield_lines ( s ) :
if line . startswith ( "[" ) :
if line . endswith ( "]" ) :
if section or content :
yield section , content
section = line [ 1 : - 1 ] . strip ( )
content = [ ]
else :
raise ValueError ( "Invalid section heading" , line )
else :
content . append ( line )
# wrap up last segment
yield section , content
|
def cli ( ctx , ** kwargs ) :
"""DragonPy is a Open source ( GPL v3 or later ) emulator
for the 30 years old homecomputer Dragon 32
and Tandy TRS - 80 Color Computer ( CoCo ) . . .
Homepage : https : / / github . com / jedie / DragonPy"""
|
log . critical ( "cli kwargs: %s" , repr ( kwargs ) )
ctx . obj = CliConfig ( ** kwargs )
|
def build ( self , builder ) :
"""Build XML by appending to builder"""
|
params = dict ( LocationOID = self . oid )
# mixins
self . mixin ( )
self . mixin_params ( params )
builder . start ( "SiteRef" , params )
builder . end ( "SiteRef" )
|
def binomial_coefficient ( n , k ) :
"""Calculate the binomial coefficient indexed by n and k .
Args :
n ( int ) : positive integer
k ( int ) : positive integer
Returns :
The binomial coefficient indexed by n and k
Raises :
TypeError : If either n or k is not an integer
ValueError : If either n or k is negative , or if k is strictly greater than n"""
|
if not isinstance ( k , int ) or not isinstance ( n , int ) :
raise TypeError ( "Expecting positive integers" )
if k > n :
raise ValueError ( "k must be lower or equal than n" )
if k < 0 or n < 0 :
raise ValueError ( "Expecting positive integers" )
return factorial ( n ) // ( factorial ( k ) * factorial ( n - k ) )
|
def get_cluster_name ( self ) :
"""Name identifying this RabbitMQ cluster ."""
|
return self . _get ( url = self . url + '/api/cluster-name' , headers = self . headers , auth = self . auth )
|
def find_abbreviations ( self , kwargs ) :
"""Find the abbreviations for the given function and kwargs .
Return ( name , abbrev , default ) tuples ."""
|
new_kwargs = [ ]
try :
sig = self . signature ( )
except ( ValueError , TypeError ) : # can ' t inspect , no info from function ; only use kwargs
return [ ( key , value , value ) for key , value in kwargs . items ( ) ]
for param in sig . parameters . values ( ) :
for name , value , default in _yield_abbreviations_for_parameter ( param , kwargs ) :
if value is empty :
raise ValueError ( 'cannot find widget or abbreviation for argument: {!r}' . format ( name ) )
new_kwargs . append ( ( name , value , default ) )
return new_kwargs
|
def _validate_logical ( self , rule , field , value ) :
"""{ ' allowed ' : ( ' allof ' , ' anyof ' , ' noneof ' , ' oneof ' ) }"""
|
if not isinstance ( value , Sequence ) :
self . _error ( field , errors . BAD_TYPE )
return
validator = self . _get_child_validator ( document_crumb = rule , allow_unknown = False , schema = self . target_validator . validation_rules )
for constraints in value :
_hash = ( mapping_hash ( { 'turing' : constraints } ) , mapping_hash ( self . target_validator . types_mapping ) )
if _hash in self . target_validator . _valid_schemas :
continue
validator ( constraints , normalize = False )
if validator . _errors :
self . _error ( validator . _errors )
else :
self . target_validator . _valid_schemas . add ( _hash )
|
def _set_from_whole_string ( rop , s , base , rnd ) :
"""Helper function for set _ str2 : accept a string , set rop , and return the
appropriate ternary value . Raise ValueError if ` ` s ` ` doesn ' t represent
a valid string in the given base ."""
|
s = s . strip ( )
ternary , endindex = mpfr . mpfr_strtofr ( rop , s , base , rnd )
if len ( s ) != endindex :
raise ValueError ( "not a valid numeric string" )
return ternary
|
def on_add_rows ( self , event ) :
"""add rows to grid"""
|
num_rows = self . rows_spin_ctrl . GetValue ( )
# last _ row = self . grid . GetNumberRows ( )
for row in range ( num_rows ) :
self . grid . add_row ( )
# if not self . grid . changes :
# self . grid . changes = set ( [ ] )
# self . grid . changes . add ( last _ row )
# last _ row + = 1
self . main_sizer . Fit ( self )
|
def _recurse ( coreml_tree , scikit_tree , tree_id , node_id , scaling = 1.0 , mode = 'regressor' , n_classes = 2 , tree_index = 0 ) :
"""Traverse through the tree and append to the tree spec ."""
|
if not ( HAS_SKLEARN ) :
raise RuntimeError ( 'scikit-learn not found. scikit-learn conversion API is disabled.' )
# # Recursion should not be called on the leaf node .
if node_id == _tree . TREE_LEAF :
raise ValueError ( "Invalid node_id %s" % _tree . TREE_LEAF )
# Add a branch node to the tree
if scikit_tree . children_left [ node_id ] != _tree . TREE_LEAF :
branch_mode = 'BranchOnValueLessThanEqual'
feature_index = scikit_tree . feature [ node_id ]
feature_value = scikit_tree . threshold [ node_id ]
left_child_id = scikit_tree . children_left [ node_id ]
right_child_id = scikit_tree . children_right [ node_id ]
# Add a branch node
coreml_tree . add_branch_node ( tree_id , node_id , feature_index , feature_value , branch_mode , left_child_id , right_child_id )
# Now recurse
_recurse ( coreml_tree , scikit_tree , tree_id , left_child_id , scaling , mode , n_classes , tree_index )
_recurse ( coreml_tree , scikit_tree , tree_id , right_child_id , scaling , mode , n_classes , tree_index )
# Add a leaf node to the tree
else : # Get the scikit - learn value
if scikit_tree . n_outputs != 1 :
raise ValueError ( 'Expected only 1 output in the scikit-learn tree.' )
value = _get_value ( scikit_tree . value [ node_id ] , mode , scaling , n_classes , tree_index )
coreml_tree . add_leaf_node ( tree_id , node_id , value )
|
def create_notification_rule ( self , data , ** kwargs ) :
"""Create a notification rule for this user ."""
|
data = { 'notification_rule' : data , }
endpoint = '{0}/{1}/notification_rules' . format ( self . endpoint , self [ 'id' ] , )
result = self . request ( 'POST' , endpoint = endpoint , data = data , query_params = kwargs )
self . _data [ 'notification_rules' ] . append ( result [ 'notification_rule' ] )
return result
|
def grok_state ( self , obj ) :
"""Determine the desired state of this
resource based on data present"""
|
if 'state' in obj :
my_state = obj [ 'state' ] . lower ( )
if my_state != 'absent' and my_state != 'present' :
raise aomi_excep . Validation ( 'state must be either "absent" or "present"' )
self . present = obj . get ( 'state' , 'present' ) . lower ( ) == 'present'
|
def parts ( self ) :
"""Split the path into parts like Pathlib
> > > expected = [ ' / ' , ' path ' , ' to ' , ' there ' ]
> > > assert DotPath ( ' / path / to / there ' ) . parts ( ) = = expected"""
|
parts = self . split ( os . path . sep )
parts [ 0 ] = parts [ 0 ] and parts [ 0 ] or '/'
return parts
|
def index_to_coordinates ( self , index ) :
"""Return a set of 2D ` ` ( x , y ) ` ` coordinates from a linear index ."""
|
self . validate_index ( index )
x = int ( index % self . length )
y = int ( ( index - x ) / self . length )
return x , y
|
def main ( ) :
"""NAME
plot _ map _ pts . py
DESCRIPTION
plots points on map
SYNTAX
plot _ map _ pts . py [ command line options ]
OPTIONS
- h prints help and quits
- sym [ ro , bs , g ^ , r . , b - , etc . ] [ 1,5,10 ] symbol and size for points
colors are r = red , b = blue , g = green , etc .
symbols are ' . ' for points , ^ , for triangle , s for square , etc .
- , for lines , - - for dotted lines , see matplotlib online documentation for plot ( )
- eye ELAT ELON [ specify eyeball location ]
- etp put on topography
- cmap color map [ default is jet ]
- f FILE , specify input file
- o color ocean blue / land green ( default is not )
- res [ c , l , i , h ] specify resolution ( crude , low , intermediate , high ]
- fmt [ pdf , eps , png ] specify output format ( default is pdf )
- R don ' t plot details of rivers
- B don ' t plot national / state boundaries , etc .
- pad [ LAT LON ] pad bounding box by LAT / LON ( default is not )
- grd SPACE specify grid spacing
- sav save plot and quit
- prj PROJ , specify one of the supported projections :
pc = Plate Carree
aea = Albers Equal Area
aeqd = Azimuthal Equidistant
lcc = Lambert Conformal
lcyl = Lambert Cylindrical
merc = Mercator
mill = Miller Cylindrical
moll = Mollweide [ default ]
ortho = Orthographic
robin = Robinson
sinu = Sinusoidal
stere = Stereographic
tmerc = Transverse Mercator
utm = UTM
laea = Lambert Azimuthal Equal Area
geos = Geostationary
npstere = North - Polar Stereographic
spstere = South - Polar Stereographic
Special codes for MagIC formatted input files :
INPUTS
space or tab delimited LON LAT data
OR :
standard MagIC formatted er _ sites or pmag _ results table
DEFAULTS
res : c
prj : mollweide ; lcc for MagIC format files
ELAT , ELON = 0,0
pad LAT , LON = 0,0
NB : high resolution or lines can be very slow"""
|
dir_path = '.'
plot = 0
ocean = 0
res = 'c'
proj = 'moll'
Lats , Lons = [ ] , [ ]
fmt = 'pdf'
sym = 'ro'
symsize = 5
fancy = 0
rivers , boundaries , ocean = 1 , 1 , 0
latmin , latmax , lonmin , lonmax , lat_0 , lon_0 = - 90 , 90 , 0. , 360. , 0. , 0.
padlat , padlon , gridspace = 0 , 0 , 30
lat_0 , lon_0 = "" , ""
basemap = 1
prn_name , prn_loc , names , locs = 0 , 0 , [ ] , [ ]
if '-WD' in sys . argv :
ind = sys . argv . index ( '-WD' )
dir_path = sys . argv [ ind + 1 ]
if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
if '-fmt' in sys . argv :
ind = sys . argv . index ( '-fmt' )
fmt = sys . argv [ ind + 1 ]
if '-res' in sys . argv :
ind = sys . argv . index ( '-res' )
res = sys . argv [ ind + 1 ]
if res != 'c' and res != 'l' :
print ( 'this resolution will take a while - be patient' )
if '-etp' in sys . argv :
fancy = 1
print ( '-W- plotting will require patience!' )
if '-ctp' in sys . argv :
basemap = 0
if '-sav' in sys . argv :
plot = 1
if '-R' in sys . argv :
rivers = 0
if '-B' in sys . argv :
boundaries = 0
if '-o' in sys . argv :
ocean = 1
if '-cmap' in sys . argv :
ind = sys . argv . index ( '-cmap' )
cmap = float ( sys . argv [ ind + 1 ] )
else :
cmap = 'jet'
if '-grd' in sys . argv :
ind = sys . argv . index ( '-grd' )
gridspace = float ( sys . argv [ ind + 1 ] )
if '-eye' in sys . argv :
ind = sys . argv . index ( '-eye' )
lat_0 = float ( sys . argv [ ind + 1 ] )
lon_0 = float ( sys . argv [ ind + 2 ] )
if '-sym' in sys . argv :
ind = sys . argv . index ( '-sym' )
sym = sys . argv [ ind + 1 ]
symsize = int ( sys . argv [ ind + 2 ] )
if '-pad' in sys . argv :
ind = sys . argv . index ( '-pad' )
padlat = float ( sys . argv [ ind + 1 ] )
padlon = float ( sys . argv [ ind + 2 ] )
if '-f' in sys . argv :
ind = sys . argv . index ( '-f' )
file = dir_path + '/' + sys . argv [ ind + 1 ]
header = open ( file , 'r' ) . readlines ( ) [ 0 ] . split ( '\t' )
if 'tab' in header [ 0 ] :
proj = 'lcc'
if 'sites' in header [ 1 ] :
latkey = 'lat'
lonkey = 'lon'
namekey = 'site'
lockey = ''
else :
print ( 'file type not supported' )
print ( main . __doc__ )
sys . exit ( )
Sites , file_type = pmag . magic_read ( file )
Lats = pmag . get_dictkey ( Sites , latkey , 'f' )
Lons = pmag . get_dictkey ( Sites , lonkey , 'f' )
if prn_name == 1 :
names = pmag . get_dictkey ( Sites , namekey , '' )
if prn_loc == 1 :
names = pmag . get_dictkey ( Sites , lockey , '' )
else :
ptdata = numpy . loadtxt ( file )
Lons = ptdata . transpose ( ) [ 0 ]
Lats = ptdata . transpose ( ) [ 1 ]
latmin = numpy . min ( Lats ) - padlat
lonmin = numpy . min ( Lons ) - padlon
latmax = numpy . max ( Lats ) + padlat
lonmax = numpy . max ( Lons ) + padlon
if lon_0 == "" :
lon_0 = 0.5 * ( lonmin + lonmax )
lat_0 = 0.5 * ( latmin + latmax )
else :
print ( "input file must be specified" )
sys . exit ( )
if '-prj' in sys . argv :
ind = sys . argv . index ( '-prj' )
proj = sys . argv [ ind + 1 ]
FIG = { 'map' : 1 }
pmagplotlib . plot_init ( FIG [ 'map' ] , 6 , 6 )
cnt = 0
Opts = { 'latmin' : latmin , 'latmax' : latmax , 'lonmin' : lonmin , 'lonmax' : lonmax , 'lat_0' : lat_0 , 'lon_0' : lon_0 , 'proj' : proj , 'sym' : sym , 'symsize' : 3 , 'pltgrid' : 1 , 'res' : res , 'boundinglat' : 0. , 'padlon' : padlon , 'padlat' : padlat , 'gridspace' : gridspace , 'cmap' : cmap }
Opts [ 'details' ] = { }
Opts [ 'details' ] [ 'coasts' ] = 1
Opts [ 'details' ] [ 'rivers' ] = rivers
Opts [ 'details' ] [ 'states' ] = boundaries
Opts [ 'details' ] [ 'countries' ] = boundaries
Opts [ 'details' ] [ 'ocean' ] = ocean
Opts [ 'details' ] [ 'fancy' ] = fancy
if len ( names ) > 0 :
Opts [ 'names' ] = names
if len ( locs ) > 0 :
Opts [ 'loc_name' ] = locs
if proj == 'merc' :
Opts [ 'latmin' ] = - 70
Opts [ 'latmax' ] = 70
Opts [ 'lonmin' ] = - 180
Opts [ 'lonmax' ] = 180
print ( 'please wait to draw points' )
Opts [ 'sym' ] = sym
Opts [ 'symsize' ] = symsize
if basemap :
pmagplotlib . plot_map ( FIG [ 'map' ] , Lats , Lons , Opts )
else :
pmagplotlib . plot_map ( FIG [ 'map' ] , Lats , Lons , Opts )
files = { }
titles = { }
titles [ 'map' ] = 'PT Map'
for key in list ( FIG . keys ( ) ) :
files [ key ] = 'map_pts' + '.' + fmt
if pmagplotlib . isServer :
black = '#000000'
purple = '#800080'
FIG = pmagplotlib . add_borders ( FIG , titles , black , purple )
pmagplotlib . save_plots ( FIG , files )
if plot == 1 :
pmagplotlib . save_plots ( FIG , files )
else :
pmagplotlib . draw_figs ( FIG )
ans = input ( " S[a]ve to save plot, Return to quit: " )
if ans == "a" :
pmagplotlib . save_plots ( FIG , files )
|
def create_run_config ( model_name , master = "" , model_dir = None , iterations_per_loop = 1000 , num_shards = 8 , log_device_placement = False , save_checkpoints_steps = 1000 , save_checkpoints_secs = None , keep_checkpoint_max = 20 , keep_checkpoint_every_n_hours = 10000 , num_gpus = 1 , gpu_order = "" , num_async_replicas = 1 , enable_graph_rewriter = False , gpu_mem_fraction = 0.95 , no_data_parallelism = False , optionally_use_dist_strat = False , daisy_chain_variables = True , schedule = "continuous_train_and_eval" , worker_job = "/job:localhost" , worker_id = 0 , ps_replicas = 0 , ps_job = "/job:ps" , ps_gpu = 0 , random_seed = None , sync = False , tpu_infeed_sleep_secs = None , use_tpu = False , use_tpu_estimator = False , xla_jit_level = tf . OptimizerOptions . OFF , inter_op_parallelism_threads = 0 , log_step_count_steps = 100 , intra_op_parallelism_threads = 0 , tpu_config_extra_kwargs = None , cloud_tpu_name = "" ) :
"""Create RunConfig , TPUConfig , and Parallelism object ."""
|
session_config = create_session_config ( log_device_placement = log_device_placement , enable_graph_rewriter = enable_graph_rewriter , gpu_mem_fraction = gpu_mem_fraction , use_tpu = use_tpu , xla_jit_level = xla_jit_level , inter_op_parallelism_threads = inter_op_parallelism_threads , intra_op_parallelism_threads = intra_op_parallelism_threads )
run_config_args = { "master" : master , "evaluation_master" : master , "model_dir" : model_dir , "session_config" : session_config , "save_summary_steps" : 100 , "save_checkpoints_steps" : save_checkpoints_steps , "save_checkpoints_secs" : save_checkpoints_secs , "keep_checkpoint_max" : keep_checkpoint_max , "keep_checkpoint_every_n_hours" : keep_checkpoint_every_n_hours , "tf_random_seed" : random_seed , "log_step_count_steps" : log_step_count_steps }
if save_checkpoints_secs :
del run_config_args [ "save_checkpoints_steps" ]
run_config_cls = tf . contrib . learn . RunConfig
if use_tpu or use_tpu_estimator : # If using TPUEstimator , use TPU RunConfig , add TPUConfig , and add
# additional args .
tpu_config_kwargs = { "iterations_per_loop" : iterations_per_loop , "num_shards" : num_shards , "per_host_input_for_training" : True , "initial_infeed_sleep_secs" : tpu_infeed_sleep_secs , }
if tpu_config_extra_kwargs is not None :
tpu_config_kwargs . update ( tpu_config_extra_kwargs )
run_config_cls = tf . contrib . tpu . RunConfig
tpu_config = tf . contrib . tpu . TPUConfig ( ** tpu_config_kwargs )
run_config_args [ "tpu_config" ] = tpu_config
if not master and "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" in os . environ : # If running on TPU but no master is set and the KUBE env var is present
# then we ' re running on ML Engine . Set the master .
run_config_args [ "master" ] = os . environ [ "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" ]
run_config_args [ "evaluation_master" ] = run_config_args [ "master" ]
elif not master and cloud_tpu_name : # Update run _ config to use cluster instead of master / evaluation _ master
# as we need the cluster spec to use Cloud Pods
tpu_cluster_resolver = tf . contrib . cluster_resolver . TPUClusterResolver ( cloud_tpu_name )
run_config_args [ "cluster" ] = tpu_cluster_resolver
del run_config_args [ "master" ]
del run_config_args [ "evaluation_master" ]
elif is_cloud_async_distributed ( ) :
run_config_cls = tf . estimator . RunConfig
del run_config_args [ "master" ]
del run_config_args [ "evaluation_master" ]
config = run_config_cls ( ** run_config_args )
# If not using TPU , add device info for data _ parallelism
config . use_tpu = use_tpu
if not use_tpu :
config . t2t_device_info = { "num_async_replicas" : num_async_replicas , }
use_distribution_strategy = ( optionally_use_dist_strat and t2t_model . T2TModel . has_symmetric_shards ( model_name ) and not no_data_parallelism and ps_replicas == 0 and ps_gpu == 0 and num_async_replicas == 1 )
if use_distribution_strategy :
tf . logging . info ( "Configuring MirroredStrategy DistributionStrategy to replicate the " "model." )
distribution = tf . contrib . distribute . MirroredStrategy ( )
config = config . replace ( train_distribute = distribution )
config . data_parallelism = None
else :
tf . logging . info ( "Configuring DataParallelism to replicate the model." )
config . data_parallelism = devices . data_parallelism ( daisy_chain_variables = daisy_chain_variables , ps_replicas = ps_replicas , ps_job = ps_job , ps_gpu = ps_gpu , schedule = schedule , sync = sync , worker_gpu = num_gpus , worker_replicas = num_async_replicas , worker_id = worker_id , gpu_order = gpu_order , worker_job = worker_job , no_data_parallelism = no_data_parallelism )
return config
|
def _get_groups ( self , data ) :
"""Get all groups defined"""
|
groups = [ ]
for attribute in SOURCE_KEYS :
for k , v in data [ attribute ] . items ( ) :
if k == None :
k = 'Sources'
if k not in groups :
groups . append ( k )
for k , v in data [ 'include_files' ] . items ( ) :
if k == None :
k = 'Includes'
if k not in groups :
groups . append ( k )
return groups
|
def perform ( self ) :
"""Perform the version upgrade on the database ."""
|
db_versions = self . table . versions ( )
version = self . version
if ( version . is_processed ( db_versions ) and not self . config . force_version == self . version . number ) :
self . log ( u'version {} is already installed' . format ( version . number ) )
return
self . start ( )
try :
self . _perform_version ( version )
except Exception :
if sys . version_info < ( 3 , 4 ) :
msg = traceback . format_exc ( ) . decode ( 'utf8' , errors = 'ignore' )
else :
msg = traceback . format_exc ( )
error = u'\n' . join ( self . logs + [ u'\n' , msg ] )
self . table . record_log ( version . number , error )
raise
self . finish ( )
|
def verify_honeypot_value ( request , field_name ) :
"""Verify that request . POST [ field _ name ] is a valid honeypot .
Ensures that the field exists and passes verification according to
HONEYPOT _ VERIFIER ."""
|
verifier = getattr ( settings , 'HONEYPOT_VERIFIER' , honeypot_equals )
if request . method == 'POST' :
field = field_name or settings . HONEYPOT_FIELD_NAME
if field not in request . POST or not verifier ( request . POST [ field ] ) :
resp = render_to_string ( 'honeypot/honeypot_error.html' , { 'fieldname' : field } )
return HttpResponseBadRequest ( resp )
|
def submit ( self , __fun , * args , ** kwargs ) :
"""Creates a new future and enqueues it . Returns the future ."""
|
future = Future ( ) . bind ( __fun , * args , ** kwargs )
self . enqueue ( future )
return future
|
def get_first_field_values_as_list ( self , field ) :
''': param str field : The name of the field for lookup .
Goes through all documents returned looking for specified field . At first encounter will return the field ' s value .'''
|
for doc in self . docs :
if field in doc . keys ( ) :
return doc [ field ]
raise SolrResponseError ( "No field in result set" )
|
def apply_magnitude_interpolation ( self , mag , iml_table ) :
"""Interpolates the tables to the required magnitude level
: param float mag :
Magnitude
: param iml _ table :
Intensity measure level table"""
|
# do not allow " mag " to exceed maximum table magnitude
if mag > self . m_w [ - 1 ] :
mag = self . m_w [ - 1 ]
# Get magnitude values
if mag < self . m_w [ 0 ] or mag > self . m_w [ - 1 ] :
raise ValueError ( "Magnitude %.2f outside of supported range " "(%.2f to %.2f)" % ( mag , self . m_w [ 0 ] , self . m_w [ - 1 ] ) )
# It is assumed that log10 of the spectral acceleration scales
# linearly ( or approximately linearly ) with magnitude
m_interpolator = interp1d ( self . m_w , numpy . log10 ( iml_table ) , axis = 1 )
return 10.0 ** m_interpolator ( mag )
|
def register_connection ( name , hosts = None , consistency = None , lazy_connect = False , retry_connect = False , cluster_options = None , default = False , session = None ) :
"""Add a connection to the connection registry . ` ` hosts ` ` and ` ` session ` ` are
mutually exclusive , and ` ` consistency ` ` , ` ` lazy _ connect ` ` ,
` ` retry _ connect ` ` , and ` ` cluster _ options ` ` only work with ` ` hosts ` ` . Using
` ` hosts ` ` will create a new : class : ` cassandra . cluster . Cluster ` and
: class : ` cassandra . cluster . Session ` .
: param list hosts : list of hosts , ( ` ` contact _ points ` ` for : class : ` cassandra . cluster . Cluster ` ) .
: param int consistency : The default : class : ` ~ . ConsistencyLevel ` for the
registered connection ' s new session . Default is the same as
: attr : ` . Session . default _ consistency _ level ` . For use with ` ` hosts ` ` only ;
will fail when used with ` ` session ` ` .
: param bool lazy _ connect : True if should not connect until first use . For
use with ` ` hosts ` ` only ; will fail when used with ` ` session ` ` .
: param bool retry _ connect : True if we should retry to connect even if there
was a connection failure initially . For use with ` ` hosts ` ` only ; will
fail when used with ` ` session ` ` .
: param dict cluster _ options : A dict of options to be used as keyword
arguments to : class : ` cassandra . cluster . Cluster ` . For use with ` ` hosts ` `
only ; will fail when used with ` ` session ` ` .
: param bool default : If True , set the new connection as the cqlengine
default
: param Session session : A : class : ` cassandra . cluster . Session ` to be used in
the created connection ."""
|
if name in _connections :
log . warning ( "Registering connection '{0}' when it already exists." . format ( name ) )
if session is not None :
invalid_config_args = ( hosts is not None or consistency is not None or lazy_connect is not False or retry_connect is not False or cluster_options is not None )
if invalid_config_args :
raise CQLEngineException ( "Session configuration arguments and 'session' argument are mutually exclusive" )
conn = Connection . from_session ( name , session = session )
conn . setup_session ( )
else : # use hosts argument
if consistency is None :
consistency = ConsistencyLevel . LOCAL_ONE
conn = Connection ( name , hosts = hosts , consistency = consistency , lazy_connect = lazy_connect , retry_connect = retry_connect , cluster_options = cluster_options )
conn . setup ( )
_connections [ name ] = conn
if default :
set_default_connection ( name )
return conn
|
async def remove ( self , * , node_id : str , force : bool = False ) -> Mapping [ str , Any ] :
"""Remove a node from a swarm .
Args :
node _ id : The ID or name of the node"""
|
params = { "force" : force }
response = await self . docker . _query_json ( "nodes/{node_id}" . format ( node_id = node_id ) , method = "DELETE" , params = params )
return response
|
def add_substitution ( self , substitution ) :
"""Add a substitution to the email
: param value : Add a substitution to the email
: type value : Substitution"""
|
if substitution . personalization :
try :
personalization = self . _personalizations [ substitution . personalization ]
has_internal_personalization = True
except IndexError :
personalization = Personalization ( )
has_internal_personalization = False
personalization . add_substitution ( substitution )
if not has_internal_personalization :
self . add_personalization ( personalization , index = substitution . personalization )
else :
if isinstance ( substitution , list ) :
for s in substitution :
for p in self . personalizations :
p . add_substitution ( s )
else :
for p in self . personalizations :
p . add_substitution ( substitution )
|
def printCols ( strlist , cols = 5 , width = 80 ) :
"""Print elements of list in cols columns"""
|
# This may exist somewhere in the Python standard libraries ?
# Should probably rewrite this , it is pretty crude .
nlines = ( len ( strlist ) + cols - 1 ) // cols
line = nlines * [ "" ]
for i in range ( len ( strlist ) ) :
c , r = divmod ( i , nlines )
nwid = c * width // cols - len ( line [ r ] )
if nwid > 0 :
line [ r ] = line [ r ] + nwid * " " + strlist [ i ]
else :
line [ r ] = line [ r ] + " " + strlist [ i ]
for s in line :
print ( s )
|
def get_lyrics_letssingit ( song_name ) :
'''Scrapes the lyrics of a song since spotify does not provide lyrics
takes song title as arguement'''
|
lyrics = ""
url = "http://search.letssingit.com/cgi-exe/am.cgi?a=search&artist_id=&l=archive&s=" + quote ( song_name . encode ( 'utf-8' ) )
html = urlopen ( url ) . read ( )
soup = BeautifulSoup ( html , "html.parser" )
link = soup . find ( 'a' , { 'class' : 'high_profile' } )
try :
link = link . get ( 'href' )
link = urlopen ( link ) . read ( )
soup = BeautifulSoup ( link , "html.parser" )
try :
lyrics = soup . find ( 'div' , { 'id' : 'lyrics' } ) . text
lyrics = lyrics [ 3 : ]
except AttributeError :
lyrics = ""
except :
lyrics = ""
return lyrics
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.