signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def updating ( name , jail = None , chroot = None , root = None , filedate = None , filename = None ) :
'''Displays UPDATING entries of software packages
CLI Example :
. . code - block : : bash
salt ' * ' pkg . updating foo
jail
Perform the action in the specified jail
CLI Example :
. . code - block : : bash
salt ' * ' pkg . updating foo jail = < jail name or id >
chroot
Perform the action in the specified chroot ( ignored if ` ` jail ` ` is
specified )
root
Perform the action in the specified root ( ignored if ` ` jail ` ` is
specified )
CLI Example :
. . code - block : : bash
salt ' * ' pkg . updating foo chroot = / path / to / chroot
filedate
Only entries newer than date are shown . Use a YYYYMMDD date format .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . updating foo filedate = 20130101
filename
Defines an alternative location of the UPDATING file .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . updating foo filename = / tmp / UPDATING'''
|
opts = ''
if filedate :
opts += 'd {0}' . format ( filedate )
if filename :
opts += 'f {0}' . format ( filename )
cmd = _pkg ( jail , chroot , root )
cmd . append ( 'updating' )
if opts :
cmd . append ( '-' + opts )
cmd . append ( name )
return __salt__ [ 'cmd.run' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
|
def write_file ( self , html , outfile ) :
"""Write an HTML string to a file ."""
|
try :
with open ( outfile , 'wt' ) as file :
file . write ( html )
except ( IOError , OSError ) as e :
err_exit ( 'Error writing %s: %s' % ( outfile , e . strerror or e ) )
|
def _execute_request ( self , request ) :
"""Helper method to execute a request , since a lock should be used
to not fire up multiple requests at the same time .
: return : Result of ` request . execute `"""
|
with GoogleCloudProvider . __gce_lock :
return request . execute ( http = self . _auth_http )
|
def quoted_tweet ( self ) :
"""The quoted Tweet as a Tweet object
If the Tweet is not a quote Tweet , return None
If the quoted Tweet payload cannot be loaded as a Tweet , this will
raise a " NotATweetError "
Returns :
Tweet : A Tweet representing the quoted status ( or None )
( see tweet _ embeds . get _ quote _ tweet , this is that value as a Tweet )
Raises :
NotATweetError : if quoted tweet is malformed"""
|
quote_tweet = tweet_embeds . get_quoted_tweet ( self )
if quote_tweet is not None :
try :
return Tweet ( quote_tweet )
except NotATweetError as nate :
raise ( NotATweetError ( "The quote-tweet payload appears malformed." + " Failed with '{}'" . format ( nate ) ) )
else :
return None
|
def slurpLines ( file , expand = False ) :
r"""Read in a complete file ( specified by a file handler or a filename
string / unicode string ) as list of lines"""
|
file = _normalizeToFile ( file , "r" , expand )
try :
return file . readlines ( )
finally :
file . close ( )
|
def decode_cf_variables ( variables , attributes , concat_characters = True , mask_and_scale = True , decode_times = True , decode_coords = True , drop_variables = None , use_cftime = None ) :
"""Decode several CF encoded variables .
See : decode _ cf _ variable"""
|
dimensions_used_by = defaultdict ( list )
for v in variables . values ( ) :
for d in v . dims :
dimensions_used_by [ d ] . append ( v )
def stackable ( dim ) : # figure out if a dimension can be concatenated over
if dim in variables :
return False
for v in dimensions_used_by [ dim ] :
if v . dtype . kind != 'S' or dim != v . dims [ - 1 ] :
return False
return True
coord_names = set ( )
if isinstance ( drop_variables , str ) :
drop_variables = [ drop_variables ]
elif drop_variables is None :
drop_variables = [ ]
drop_variables = set ( drop_variables )
# Time bounds coordinates might miss the decoding attributes
if decode_times :
_update_bounds_attributes ( variables )
new_vars = OrderedDict ( )
for k , v in variables . items ( ) :
if k in drop_variables :
continue
stack_char_dim = ( concat_characters and v . dtype == 'S1' and v . ndim > 0 and stackable ( v . dims [ - 1 ] ) )
new_vars [ k ] = decode_cf_variable ( k , v , concat_characters = concat_characters , mask_and_scale = mask_and_scale , decode_times = decode_times , stack_char_dim = stack_char_dim , use_cftime = use_cftime )
if decode_coords :
var_attrs = new_vars [ k ] . attrs
if 'coordinates' in var_attrs :
coord_str = var_attrs [ 'coordinates' ]
var_coord_names = coord_str . split ( )
if all ( k in variables for k in var_coord_names ) :
new_vars [ k ] . encoding [ 'coordinates' ] = coord_str
del var_attrs [ 'coordinates' ]
coord_names . update ( var_coord_names )
if decode_coords and 'coordinates' in attributes :
attributes = OrderedDict ( attributes )
coord_names . update ( attributes . pop ( 'coordinates' ) . split ( ) )
return new_vars , attributes , coord_names
|
async def handle_agent_hello ( self , agent_addr , message : AgentHello ) :
"""Handle an AgentAvailable message . Add agent _ addr to the list of available agents"""
|
self . _logger . info ( "Agent %s (%s) said hello" , agent_addr , message . friendly_name )
if agent_addr in self . _registered_agents : # Delete previous instance of this agent , if any
await self . _delete_agent ( agent_addr )
self . _registered_agents [ agent_addr ] = message . friendly_name
self . _available_agents . extend ( [ agent_addr for _ in range ( 0 , message . available_job_slots ) ] )
self . _containers_on_agent [ agent_addr ] = message . available_containers . keys ( )
self . _ping_count [ agent_addr ] = 0
# update information about available containers
for container_name , container_info in message . available_containers . items ( ) :
if container_name in self . _containers : # check if the id is the same
if self . _containers [ container_name ] [ 0 ] == container_info [ "id" ] : # ok , just add the agent to the list of agents that have the container
self . _logger . debug ( "Registering container %s for agent %s" , container_name , str ( agent_addr ) )
self . _containers [ container_name ] [ 2 ] . append ( agent_addr )
elif self . _containers [ container_name ] [ 1 ] > container_info [ "created" ] : # containers stored have been created after the new one
# add the agent , but emit a warning
self . _logger . warning ( "Container %s has multiple version: \n" "\t Currently registered agents have version %s (%i)\n" "\t New agent %s has version %s (%i)" , container_name , self . _containers [ container_name ] [ 0 ] , self . _containers [ container_name ] [ 1 ] , str ( agent_addr ) , container_info [ "id" ] , container_info [ "created" ] )
self . _containers [ container_name ] [ 2 ] . append ( agent_addr )
else : # self . _ containers [ container _ name ] [ 1 ] < container _ info [ " created " ] :
# containers stored have been created before the new one
# add the agent , update the infos , and emit a warning
self . _logger . warning ( "Container %s has multiple version: \n" "\t Currently registered agents have version %s (%i)\n" "\t New agent %s has version %s (%i)" , container_name , self . _containers [ container_name ] [ 0 ] , self . _containers [ container_name ] [ 1 ] , str ( agent_addr ) , container_info [ "id" ] , container_info [ "created" ] )
self . _containers [ container_name ] = ( container_info [ "id" ] , container_info [ "created" ] , self . _containers [ container_name ] [ 2 ] + [ agent_addr ] )
else : # just add it
self . _logger . debug ( "Registering container %s for agent %s" , container_name , str ( agent_addr ) )
self . _containers [ container_name ] = ( container_info [ "id" ] , container_info [ "created" ] , [ agent_addr ] )
# update the queue
await self . update_queue ( )
# update clients
await self . send_container_update_to_client ( self . _registered_clients )
|
def delete_ssh_template ( auth , url , template_name = None , template_id = None ) :
"""Takes template _ name as input to issue RESTUL call to HP IMC which will delete the specific
ssh template from the IMC system
: param auth : requests auth object # usually auth . creds from auth pyhpeimc . auth . class
: param url : base url of IMC RS interface # usually auth . url from pyhpeimc . auth . authclass
: param template _ name : str value of template name
: param template _ id : str value template template _ id value
: return : int HTTP response code
: rtype int"""
|
try :
if template_id is None :
ssh_templates = get_ssh_template ( auth , url )
if template_name is None :
template_name = ssh_template [ 'name' ]
template_id = None
for template in ssh_templates :
if template [ 'name' ] == template_name :
template_id = template [ 'id' ]
f_url = url + "/imcrs/plat/res/ssh/%s/delete" % template_id
response = requests . delete ( f_url , auth = auth , headers = HEADERS )
return response . status_code
except requests . exceptions . RequestException as error :
return "Error:\n" + str ( error ) + " delete_ssh_template: An Error has occured"
|
def expose ( * methods ) :
"""A decorator for exposing the methods of a class .
Parameters
* methods : str
A str representation of the methods that should be exposed to callbacks .
Returns
decorator : function
A function accepting one argument - the class whose methods will be
exposed - and which returns a new : class : ` Watchable ` that will
notify a : class : ` Spectator ` when those methods are called .
Notes
This is essentially a decorator version of : func : ` expose _ as `"""
|
def setup ( base ) :
return expose_as ( base . __name__ , base , * methods )
return setup
|
def check ( self , request , user ) :
"""check if the service is well configured
: return : Boolean"""
|
redirect_uris = '%s://%s%s' % ( request . scheme , request . get_host ( ) , reverse ( 'mastodon_callback' ) )
us = UserService . objects . get ( user = user , name = 'ServiceMastodon' )
client_id , client_secret = MastodonAPI . create_app ( client_name = "TriggerHappy" , api_base_url = us . host , redirect_uris = redirect_uris )
# get the token by logging in
mastodon = MastodonAPI ( client_id = client_id , client_secret = client_secret , api_base_url = us . host )
try :
mastodon . log_in ( username = us . username , password = us . password )
return True
except MastodonIllegalArgumentError as e :
return e
|
def re_line_and_indentation ( base_indentation , modifiers = ( True , True ) ) :
"""Returns a re matching newline + base _ indentation .
modifiers is a tuple , ( include _ first , include _ final ) .
If include _ first , matches indentation at the beginning of the string .
If include _ final , matches indentation at the end of the string .
Cached ."""
|
cache = re_line_and_indentation . cache [ modifiers ]
compiled = cache . get ( base_indentation , None )
if compiled is None :
[ prefix , suffix ] = re_line_and_indentation . tuple [ modifiers ]
compiled = cache [ modifiers ] = _re . compile ( prefix + base_indentation + suffix )
return compiled
|
def __get_segmentation_path ( self , path ) :
"""Create path with " _ segmentation " suffix and keep extension .
: param path :
: return :"""
|
startpath , ext = os . path . splitext ( path )
segmentation_path = startpath + "_segmentation" + ext
return segmentation_path
|
def artifact_bundles ( self ) :
"""Gets the Artifact Bundles API client .
Returns :
ArtifactBundles :"""
|
if not self . __artifact_bundles :
self . __artifact_bundles = ArtifactBundles ( self . __connection )
return self . __artifact_bundles
|
def _ctxs ( self ) :
""": rtype : list of SliceViewContext"""
|
return [ self . _tab_widget . widget ( index ) . context for index in range ( 0 , self . _tab_widget . count ( ) ) ]
|
def check_garner ( text ) :
"""Suggest the preferred forms .
source : Garner ' s Modern American Usage
source _ url : http : / / bit . ly / 1T4alrY"""
|
err = "redundancy.garner"
msg = "Redundancy. Use '{}' instead of '{}'."
redundancies = [ [ "adequate" , [ "adequate enough" ] ] , [ "admitted" , [ "self-admitted" ] ] , [ "affidavit" , [ "sworn affidavit" ] ] , [ "agreement" , [ "mutual agreement" ] ] , [ "alumnus" , [ "former alumnus" ] ] , [ "antithetical" , [ "directly antithetical" ] ] , [ "approximately" , [ "approximately about" ] ] , [ "associate" , [ "associate together(?: in groups)?" ] ] , [ "bivouac" , [ "temporary bivouac" , "bivouac camp" ] ] , [ "blend" , [ "blend together" ] ] , [ "but" , [ "but nevertheless" ] ] , [ "charged with..." , [ "accused of a charge" ] ] , [ "circumstances of" , [ "circumstances surrounding" ] ] , [ "circumstances" , [ "surrounding circumstances" ] ] , [ "close" , [ "close proximity" ] ] , [ "collaborate" , [ "collaborate together" ] ] , [ "collaborator" , [ "fellow collaborator" ] ] , [ "collaborators" , [ "fellow collaborators" ] ] , [ "collocated" , [ "collocated together" ] ] , [ "colleagues" , [ "fellow colleagues" ] ] , [ "combine" , [ "combine together" ] ] , [ "complacent" , [ "self-complacent" ] ] , [ "confessed" , [ "self-confessed" ] ] , [ "connect" , [ "connect together" ] ] , [ "consensus" , [ "(?:general )?consensus of opinion" ] ] , [ "consolidate" , [ "consolidate together" ] ] , [ "continues to" , [ "still continues to" ] ] , [ "contradictory" , [ "mutually contradictory" ] ] , [ "cooperation" , [ "mutual cooperation" ] ] , [ "couple" , [ "couple together" ] ] , [ "crisis" , [ "serious crisis" ] ] , [ "eliminate" , [ "entirely eliminate" ] ] , [ "especially" , [ "most especially" ] ] , [ "fact" , [ "actual fact" ] ] , [ "facts" , [ "true facts" ] ] , [ "forecast" , [ "future forecast" ] ] , [ "founding fathers" , [ "founding forefathers" ] ] , [ "free" , [ "free and gratis" ] ] , [ "free" , [ "free gratis" ] ] , [ "full" , [ "completely full" ] ] , [ "fundamentals" , [ "basic fundamentals" ] ] , [ "gift" , [ "free gift" ] ] , [ "innovation" , [ "new innovation" ] ] , [ "interact" , [ "interact with each other" ] ] , [ "large" , [ "large-size" ] ] , [ "meld" , [ "meld together" ] ] , [ "merge" , [ "merge together" ] ] , [ "mingle" , [ "mingle together" ] ] , [ "mix" , [ "mix together" ] ] , [ "mutual feelings" , [ "mutual feelings for eachother" ] ] , [ "mutual respect" , [ "mutual respect for each other" ] ] , [ "native citizen" , [ "native-born citizen" ] ] , [ "necessity" , [ "absolute necessity" ] ] , [ "obvious" , [ "blatantly obvious" ] ] , [ "pause" , [ "pause for a moment" ] ] , [ "planning" , [ "advance planning" ] ] , [ "plans" , [ "future plans" ] ] , [ "pooled" , [ "pooled together" ] ] , [ "potable water" , [ "potable drinking water" ] ] , [ "potable water" , [ "potable drinking water" ] ] , [ "recruit" , [ "new recruit" ] ] , [ "reelected" , [ "reelected for another term" ] ] , [ "refer" , [ "refer back" ] ] , [ "regress" , [ "regress back" ] ] , [ "repay them" , [ "repay them back" ] ] , [ "repay" , [ "repay back" ] ] , [ "repeat" , [ "repeat again" ] ] , [ "repeat" , [ "repeat back" ] ] , [ "repeat" , [ "repeat the same" ] ] , [ "repeated" , [ "repeated the same" ] ] , [ "reprieve" , [ "temporary reprieve" ] ] , [ "respite" , [ "brief respite" ] ] , [ "retirement" , [ "retiral" , "retiracy" ] ] , [ "retreat" , [ "retreat back" ] ] , [ "return" , [ "return back" ] ] , [ "scrutinize" , [ "closely scrutinize" ] ] , [ "software" , [ "software program" ] ] , [ "surrounded" , [ "surrounded on all sides" ] ] , [ "the nation" , [ "the whole entire nation" ] ] , [ "throughout the" , [ "throughout the entire" ] ] , [ "timpani" , [ "timpani drum" ] ] , [ "twins" , [ "pair of twins" ] ] , [ "vacancy" , [ "unfilled vacancy" ] ] , [ "various" , [ "various different" ] ] , [ "veteran" , [ "former veteran" ] ] , [ "visible" , [ "visible to the eye" ] ] , [ "vocation" , [ "professional vocation" ] ] , [ "while" , [ "while at the same time" ] ] , ]
return preferred_forms_check ( text , redundancies , err , msg )
|
def load_pyproject_toml ( use_pep517 , # type : Optional [ bool ]
pyproject_toml , # type : str
setup_py , # type : str
req_name # type : str
) : # type : ( . . . ) - > Optional [ Tuple [ List [ str ] , str , List [ str ] ] ]
"""Load the pyproject . toml file .
Parameters :
use _ pep517 - Has the user requested PEP 517 processing ? None
means the user hasn ' t explicitly specified .
pyproject _ toml - Location of the project ' s pyproject . toml file
setup _ py - Location of the project ' s setup . py file
req _ name - The name of the requirement we ' re processing ( for
error reporting )
Returns :
None if we should use the legacy code path , otherwise a tuple
requirements from pyproject . toml ,
name of PEP 517 backend ,
requirements we should check are installed after setting
up the build environment"""
|
has_pyproject = os . path . isfile ( pyproject_toml )
has_setup = os . path . isfile ( setup_py )
if has_pyproject :
with io . open ( pyproject_toml , encoding = "utf-8" ) as f :
pp_toml = pytoml . load ( f )
build_system = pp_toml . get ( "build-system" )
else :
build_system = None
# The following cases must use PEP 517
# We check for use _ pep517 being non - None and falsey because that means
# the user explicitly requested - - no - use - pep517 . The value 0 as
# opposed to False can occur when the value is provided via an
# environment variable or config file option ( due to the quirk of
# strtobool ( ) returning an integer in pip ' s configuration code ) .
if has_pyproject and not has_setup :
if use_pep517 is not None and not use_pep517 :
raise InstallationError ( "Disabling PEP 517 processing is invalid: " "project does not have a setup.py" )
use_pep517 = True
elif build_system and "build-backend" in build_system :
if use_pep517 is not None and not use_pep517 :
raise InstallationError ( "Disabling PEP 517 processing is invalid: " "project specifies a build backend of {} " "in pyproject.toml" . format ( build_system [ "build-backend" ] ) )
use_pep517 = True
# If we haven ' t worked out whether to use PEP 517 yet ,
# and the user hasn ' t explicitly stated a preference ,
# we do so if the project has a pyproject . toml file .
elif use_pep517 is None :
use_pep517 = has_pyproject
# At this point , we know whether we ' re going to use PEP 517.
assert use_pep517 is not None
# If we ' re using the legacy code path , there is nothing further
# for us to do here .
if not use_pep517 :
return None
if build_system is None : # Either the user has a pyproject . toml with no build - system
# section , or the user has no pyproject . toml , but has opted in
# explicitly via - - use - pep517.
# In the absence of any explicit backend specification , we
# assume the setuptools backend that most closely emulates the
# traditional direct setup . py execution , and require wheel and
# a version of setuptools that supports that backend .
build_system = { "requires" : [ "setuptools>=40.8.0" , "wheel" ] , "build-backend" : "setuptools.build_meta:__legacy__" , }
# If we ' re using PEP 517 , we have build system information ( either
# from pyproject . toml , or defaulted by the code above ) .
# Note that at this point , we do not know if the user has actually
# specified a backend , though .
assert build_system is not None
# Ensure that the build - system section in pyproject . toml conforms
# to PEP 518.
error_template = ( "{package} has a pyproject.toml file that does not comply " "with PEP 518: {reason}" )
# Specifying the build - system table but not the requires key is invalid
if "requires" not in build_system :
raise InstallationError ( error_template . format ( package = req_name , reason = ( "it has a 'build-system' table but not " "'build-system.requires' which is mandatory in the table" ) ) )
# Error out if requires is not a list of strings
requires = build_system [ "requires" ]
if not _is_list_of_str ( requires ) :
raise InstallationError ( error_template . format ( package = req_name , reason = "'build-system.requires' is not a list of strings." , ) )
backend = build_system . get ( "build-backend" )
check = [ ]
# type : List [ str ]
if backend is None : # If the user didn ' t specify a backend , we assume they want to use
# the setuptools backend . But we can ' t be sure they have included
# a version of setuptools which supplies the backend , or wheel
# ( which is needed by the backend ) in their requirements . So we
# make a note to check that those requirements are present once
# we have set up the environment .
# This is quite a lot of work to check for a very specific case . But
# the problem is , that case is potentially quite common - projects that
# adopted PEP 518 early for the ability to specify requirements to
# execute setup . py , but never considered needing to mention the build
# tools themselves . The original PEP 518 code had a similar check ( but
# implemented in a different way ) .
backend = "setuptools.build_meta:__legacy__"
check = [ "setuptools>=40.8.0" , "wheel" ]
return ( requires , backend , check )
|
def split ( args ) :
"""% prog split file outdir N
Split file into N records . This allows splitting FASTA / FASTQ / TXT file
properly at boundary of records . Split is useful for parallelization
on input chunks .
Option - - mode is useful on how to break into chunks .
1 . chunk - chunk records sequentially , 1-100 in file 1 , 101-200 in file 2 , etc .
2 . cycle - chunk records in Round Robin fashion
3 . optimal - try to make split file of roughly similar sizes , using LPT
algorithm . This is the default ."""
|
p = OptionParser ( split . __doc__ )
mode_choices = ( "batch" , "cycle" , "optimal" )
p . add_option ( "--all" , default = False , action = "store_true" , help = "split all records [default: %default]" )
p . add_option ( "--mode" , default = "optimal" , choices = mode_choices , help = "Mode when splitting records [default: %default]" )
p . add_option ( "--format" , choices = ( "fasta" , "fastq" , "txt" , "clust" ) , help = "input file format [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 3 :
sys . exit ( not p . print_help ( ) )
filename , outdir , N = args
fs = FileSplitter ( filename , outputdir = outdir , format = opts . format , mode = opts . mode )
if opts . all :
logging . debug ( "option -all override N" )
N = fs . num_records
else :
N = min ( fs . num_records , int ( N ) )
assert N > 0 , "N must be > 0"
logging . debug ( "split file into %d chunks" % N )
fs . split ( N )
return fs
|
def showDraw ( self , fignum = 1 ) :
"""show the element drawing
: param fignum : define figure number to show element drawing"""
|
if self . _patches == [ ] :
print ( "Please setDraw() before showDraw(), then try again." )
return
else :
fig = plt . figure ( fignum )
fig . clear ( )
ax = fig . add_subplot ( 111 , aspect = 'equal' )
[ ax . add_patch ( i ) for i in self . _patches ]
bbox = self . _patches [ 0 ] . get_path ( ) . get_extents ( )
x0 = 2.0 * min ( bbox . xmin , bbox . ymin )
x1 = 2.0 * max ( bbox . xmax , bbox . ymax )
ax . set_xlim ( x0 , x1 )
ax . set_ylim ( x0 , x1 )
# x1 , y1 = tuple ( self . nextp0)
# x2 , y2 = tuple ( self . nextp1)
# x3 , y3 = tuple ( self . nextpc )
# ax . plot ( [ x1 , x2 , x3 ] , [ y1 , y2 , y3 ] , ' o ' ) # , ms = 5 , fc = ' b ' , ec = ' b ' )
x , y = tuple ( self . next_p0 )
ax . plot ( x , y , 'o' , ms = 10 , c = 'b' )
ax . annotate ( s = self . _anote [ 'name' ] , xy = self . _anote [ 'xypos' ] , xytext = self . _anote [ 'textpos' ] , textcoords = 'data' , arrowprops = dict ( arrowstyle = '->' ) , rotation = - 90 , fontsize = 'small' )
fig . canvas . draw ( )
plt . grid ( )
plt . show ( )
|
def _unpack_oxm_field ( self ) :
"""Unpack oxm _ field from oxm _ field _ and _ mask .
Returns :
: class : ` OxmOfbMatchField ` , int : oxm _ field from oxm _ field _ and _ mask .
Raises :
ValueError : If oxm _ class is OFPXMC _ OPENFLOW _ BASIC but
: class : ` OxmOfbMatchField ` has no such integer value ."""
|
field_int = self . oxm_field_and_mask >> 1
# We know that the class below requires a subset of the ofb enum
if self . oxm_class == OxmClass . OFPXMC_OPENFLOW_BASIC :
return OxmOfbMatchField ( field_int )
return field_int
|
def assignrepr ( self , prefix , style = None , utcoffset = None ) :
"""Return a | repr | string with an prefixed assignement .
Without option arguments given , printing the returned string
looks like :
> > > from hydpy import Timegrid
> > > timegrid = Timegrid ( ' 1996-11-01 00:00:00 ' ,
. . . ' 1997-11-01 00:00:00 ' ,
. . . ' 1d ' )
> > > print ( timegrid . assignrepr ( prefix = ' timegrid = ' ) )
timegrid = Timegrid ( ' 1996-11-01 00:00:00 ' ,
'1997-11-01 00:00:00 ' ,
'1d ' )
The optional arguments are passed to method | Date . to _ repr |
without any modifications :
> > > print ( timegrid . assignrepr (
. . . prefix = ' ' , style = ' iso1 ' , utcoffset = 120 ) )
Timegrid ( ' 1996-11-01T01:00:00 + 02:00 ' ,
'1997-11-01T01:00:00 + 02:00 ' ,
'1d ' )"""
|
skip = len ( prefix ) + 9
blanks = ' ' * skip
return ( f"{prefix}Timegrid('" f"{self.firstdate.to_string(style, utcoffset)}',\n" f"{blanks}'{self.lastdate.to_string(style, utcoffset)}',\n" f"{blanks}'{str(self.stepsize)}')" )
|
def remove_widget ( self ) :
"""Removes the Component Widget from the engine .
: return : Method success .
: rtype : bool"""
|
LOGGER . debug ( "> Removing '{0}' Component Widget." . format ( self . __class__ . __name__ ) )
self . __preferences_manager . findChild ( QGridLayout , "Others_Preferences_gridLayout" ) . removeWidget ( self )
self . TCP_Client_Ui_groupBox . setParent ( None )
return True
|
def get_or_create_time_series ( self , label_values ) :
"""Get a mutable measurement for the given set of label values .
: type label _ values : list ( : class : ` LabelValue ` )
: param label _ values : The measurement ' s label values .
: rtype : : class : ` GaugePointLong ` , : class : ` GaugePointDouble `
: class : ` opencensus . metrics . export . cumulative . CumulativePointLong ` ,
or
: class : ` opencensus . metrics . export . cumulative . CumulativePointDouble `
: return : A mutable point that represents the last value of the
measurement ."""
|
if label_values is None :
raise ValueError
if any ( lv is None for lv in label_values ) :
raise ValueError
if len ( label_values ) != self . _len_label_keys :
raise ValueError
return self . _get_or_create_time_series ( label_values )
|
def nvmlDeviceGetMemoryInfo ( handle ) :
r"""* Retrieves the amount of used , free and total memory available on the device , in bytes .
* For all products .
* Enabling ECC reduces the amount of total available memory , due to the extra required parity bits .
* Under WDDM most device memory is allocated and managed on startup by Windows .
* Under Linux and Windows TCC , the reported amount of used memory is equal to the sum of memory allocated
* by all active channels on the device .
* See \ ref nvmlMemory _ t for details on available memory info .
* @ param device The identifier of the target device
* @ param memory Reference in which to return the memory information
* @ return
* - \ ref NVML _ SUCCESS if \ a memory has been populated
* - \ ref NVML _ ERROR _ UNINITIALIZED if the library has not been successfully initialized
* - \ ref NVML _ ERROR _ INVALID _ ARGUMENT if \ a device is invalid or \ a memory is NULL
* - \ ref NVML _ ERROR _ GPU _ IS _ LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ ref NVML _ ERROR _ UNKNOWN on any unexpected error
nvmlReturn _ t DECLDIR nvmlDeviceGetMemoryInfo"""
|
c_memory = c_nvmlMemory_t ( )
fn = _nvmlGetFunctionPointer ( "nvmlDeviceGetMemoryInfo" )
ret = fn ( handle , byref ( c_memory ) )
_nvmlCheckReturn ( ret )
return bytes_to_str ( c_memory )
|
def main ( set_to = None , set_patch_count = None , release = None , bump = None , lock = None , file_triggers = None , config_path = None , ** extra_updates ) :
"""Main workflow .
Load config from cli and file
Detect " bump triggers " - things that cause a version increment
Find the current version
Create a new version
Write out new version and any other requested variables
: param set _ to : explicitly set semver to this version string
: param set _ patch _ count : sets the patch number to the commit count
: param release : marks with a production flag
just sets a single flag as per config
: param bump : string indicating major / minor / patch
more significant bumps will zero the less significant ones
: param lock : locks the version string for the next call to autoversion
lock only removed if a version bump would have occurred
: param file _ triggers : whether to enable bumping based on file triggers
bumping occurs once if any file ( s ) exist that match the config
: param config _ path : path to config file
: param extra _ updates :
: return :"""
|
updates = { }
if config_path :
get_or_create_config ( config_path , config )
for k , v in config . regexers . items ( ) :
config . regexers [ k ] = re . compile ( v )
# a forward - mapping of the configured aliases
# giving < our config param > : < the configured value >
# if a value occurs multiple times , we take the last set value
for k , v in config . key_aliases . items ( ) :
config . _forward_aliases [ v ] = k
all_data = read_targets ( config . targets )
current_semver = semver . get_current_semver ( all_data )
triggers = get_all_triggers ( bump , file_triggers )
updates . update ( get_lock_behaviour ( triggers , all_data , lock ) )
updates . update ( get_dvcs_info ( ) )
if set_to :
_LOG . debug ( "setting version directly: %s" , set_to )
new_semver = auto_version . definitions . SemVer ( * set_to . split ( "." ) )
if not lock :
warnings . warn ( "After setting version manually, does it need locking for a CI flow?" , UserWarning , )
elif set_patch_count :
_LOG . debug ( "auto-incrementing version, using commit count for patch: %s" , updates [ Constants . COMMIT_COUNT_FIELD ] , )
new_semver = semver . make_new_semver ( current_semver , triggers , patch = updates [ Constants . COMMIT_COUNT_FIELD ] )
else :
_LOG . debug ( "auto-incrementing version" )
new_semver = semver . make_new_semver ( current_semver , triggers )
updates . update ( get_final_version_string ( release_mode = release , semver = new_semver , commit_count = updates . get ( Constants . COMMIT_COUNT_FIELD , 0 ) , ) )
for part in semver . SemVerSigFig :
updates [ part ] = getattr ( new_semver , part )
# only rewrite a field that the user has specified in the configuration
native_updates = { native : updates [ key ] for native , key in config . key_aliases . items ( ) if key in updates }
# finally , add in commandline overrides
native_updates . update ( extra_updates )
write_targets ( config . targets , ** native_updates )
return current_semver , new_semver , native_updates
|
def to_compressed ( self ) :
"""Compress an IP address to its shortest possible compressed form .
> > > print ( IP ( ' 127.0.0.1 ' ) . to _ compressed ( ) )
127.1
> > > print ( IP ( ' 127.1.0.1 ' ) . to _ compressed ( ) )
127.1.1
> > > print ( IP ( ' 127.0.1.1 ' ) . to _ compressed ( ) )
127.0.1.1
> > > print ( IP ( ' 2001:1234:0000:0000:0000:0000:0000:5678 ' ) . to _ compressed ( ) )
2001:1234 : : 5678
> > > print ( IP ( ' 1234:0000:0000 : beef : 0000:0000:0000:5678 ' ) . to _ compressed ( ) )
1234:0:0 : beef : : 5678
> > > print ( IP ( ' 0000:0000:0000:0000:0000:0000:0000:0001 ' ) . to _ compressed ( ) )
> > > print ( IP ( ' fe80:0000:0000:0000:0000:0000:0000:0000 ' ) . to _ compressed ( ) )
fe80 : :"""
|
if self . v == 4 :
quads = self . dq . split ( '.' )
try :
zero = quads . index ( '0' )
if zero == 1 and quads . index ( '0' , zero + 1 ) :
quads . pop ( zero )
quads . pop ( zero )
return '.' . join ( quads )
elif zero == 2 :
quads . pop ( zero )
return '.' . join ( quads )
except ValueError : # No zeroes
pass
return self . dq
else :
quads = map ( lambda q : '%x' % ( int ( q , 16 ) ) , self . dq . split ( ':' ) )
quadc = ':%s:' % ( ':' . join ( quads ) , )
zeros = [ 0 , - 1 ]
# Find the largest group of zeros
for match in re . finditer ( r'(:[:0]+)' , quadc ) :
count = len ( match . group ( 1 ) ) - 1
if count > zeros [ 0 ] :
zeros = [ count , match . start ( 1 ) ]
count , where = zeros
if count :
quadc = quadc [ : where ] + ':' + quadc [ where + count : ]
quadc = re . sub ( r'((^:)|(:$))' , '' , quadc )
quadc = re . sub ( r'((^:)|(:$))' , '::' , quadc )
return quadc
|
def metadata ( self , run_id = None ) :
"""Provide metadata on a Ding0 run
Parameters
run _ id : str , ( defaults to current date )
Distinguish multiple versions of Ding0 data by a ` run _ id ` . If not
set it defaults to current date in the format YYYYMMDDhhmmss
Returns
dict
Metadata"""
|
# Get latest version and / or git commit hash
try :
version = subprocess . check_output ( [ "git" , "describe" , "--tags" , "--always" ] ) . decode ( 'utf8' )
except :
version = None
# Collect names of database table used to run Ding0 and data version
if self . config [ 'input_data_source' ] [ 'input_data' ] == 'versioned' :
data_version = self . config [ 'versioned' ] [ 'version' ]
database_tables = self . config [ 'versioned' ]
elif self . config [ 'input_data_source' ] [ 'input_data' ] == 'model_draft' :
data_version = 'model_draft'
database_tables = self . config [ 'model_draft' ]
else :
data_version = 'unknown'
database_tables = 'unknown'
# Collect assumptions
assumptions = { }
assumptions . update ( self . config [ 'assumptions' ] )
assumptions . update ( self . config [ 'mv_connect' ] )
assumptions . update ( self . config [ 'mv_routing' ] )
assumptions . update ( self . config [ 'mv_routing_tech_constraints' ] )
# Determine run _ id if not set
if not run_id :
run_id = datetime . now ( ) . strftime ( "%Y%m%d%H%M%S" )
# Set instance attribute run _ id
if not self . _run_id :
self . _run_id = run_id
# Assing data to dict
metadata = dict ( version = version , mv_grid_districts = [ int ( _ . id_db ) for _ in self . _mv_grid_districts ] , database_tables = database_tables , data_version = data_version , assumptions = assumptions , run_id = self . _run_id )
return metadata
|
def waliki_box ( context , slug , show_edit = True , * args , ** kwargs ) :
"""A templatetag to render a wiki page content as a box in any webpage ,
and allow rapid edition if you have permission .
It ' s inspired in ` django - boxes ` _
. . _ django - boxes : https : / / github . com / eldarion / django - boxes"""
|
request = context [ "request" ]
try :
page = Page . objects . get ( slug = slug )
except Page . DoesNotExist :
page = None
if ( page and check_perms_helper ( 'change_page' , request . user , slug ) or ( not page and check_perms_helper ( 'add_page' , request . user , slug ) ) ) :
form = PageForm ( instance = page , initial = { 'slug' : slug } )
form_action = reverse ( "waliki_edit" , args = [ slug ] )
else :
form = None
form_action = None
return { "request" : request , "slug" : slug , "label" : slug . replace ( '/' , '_' ) , "page" : page , "form" : form , "form_action" : form_action , }
|
def safe_run ( coro , return_exceptions = False ) :
"""Executes a given coroutine and optionally catches exceptions , returning
them as value . This function is intended to be used internally ."""
|
try :
result = yield from coro
except Exception as err :
if return_exceptions :
result = err
else :
raise err
return result
|
def stop ( self , timeout = None ) :
"""Stops the task thread . Synchronous !"""
|
with self . _lock :
if self . _thread :
self . _queue . put_nowait ( self . _terminator )
self . _thread . join ( timeout = timeout )
self . _thread = None
self . _thread_for_pid = None
|
def get_signatures_from_script ( script ) :
"""Returns a list of signatures retrieved from the provided ( partially )
signed multisig scriptSig .
: param script : The partially - signed multisig scriptSig .
: type script : ` ` bytes ` `
: returns : A list of retrieved signature from the provided scriptSig .
: rtype : A ` ` list ` ` of ` ` bytes ` ` signatures"""
|
script = script [ 1 : ]
# remove the first OP _ 0
sigs = [ ]
while len ( script ) > 0 : # Consume script while extracting possible signatures :
val , script = read_var_int ( script )
potential_sig , script = read_bytes ( script , val )
try : # Raises ValueError if argument to ` der _ to _ cdata ` is not a
# DER - encoding of a signature ( without the appended SIGHASH ) .
der_to_cdata ( potential_sig [ : - 1 ] )
# We only add if DER - encoded signature :
sigs . append ( potential_sig )
except ValueError :
pass
return sigs
|
def display_output ( arguments ) :
'''Display the ASCII art from the image .'''
|
global _ASCII
if arguments [ '--alt-chars' ] :
_ASCII = _ASCII_2
try :
im = Image . open ( arguments [ 'FILE' ] )
except :
raise IOError ( 'Unable to open the file.' )
im = im . convert ( "RGBA" )
aspect_ratio = float ( im . size [ 0 ] ) / im . size [ 1 ]
scaled_height = _WIDTH / aspect_ratio
scaled_width = _HEIGHT * aspect_ratio * 2
if scaled_width > _WIDTH :
width = int ( _WIDTH )
height = int ( scaled_height / 2 )
elif scaled_height > _HEIGHT :
width = int ( scaled_width )
height = int ( _HEIGHT )
if arguments [ '--width' ] :
width = int ( arguments [ '--width' ] )
height = int ( width / aspect_ratio / 2 )
elif arguments [ '--height' ] :
height = int ( arguments [ '--height' ] )
width = int ( height * aspect_ratio * 2 )
if arguments [ '--width' ] and arguments [ '--height' ] :
height = int ( arguments [ '--height' ] )
width = int ( arguments [ '--width' ] )
im = im . resize ( ( width , height ) , resample = Image . ANTIALIAS )
enhancer = ImageEnhance . Contrast ( im )
im = enhancer . enhance ( float ( arguments [ '--contrast' ] ) )
img = im . getdata ( )
im = im . convert ( 'L' )
# Grayscale
if not arguments [ '--invert' ] :
_ASCII = _ASCII [ : : - 1 ]
bg = None ;
if arguments [ '--bg' ] == 'BLACK' :
bg = rgb ( 0 , 0 , 0 )
fg = rgb ( 5 , 5 , 5 )
elif arguments [ '--bg' ] == 'WHITE' :
bg = rgb ( 5 , 5 , 5 )
fg = rgb ( 0 , 0 , 0 )
row_len = 0
if arguments [ '--bold' ] :
bold = True
else :
bold = False
for count , i in enumerate ( im . getdata ( ) ) :
ascii_char = _ASCII [ int ( ( ( i / 255.0 ) ) * ( len ( ( _ASCII ) ) - 1 ) ) ]
try :
if not arguments [ '--colors' ] :
raise Exception
color = rgb ( int ( ( img [ count ] [ 0 ] / 255.0 ) * 5 ) , int ( ( img [ count ] [ 1 ] / 255.0 ) * 5 ) , int ( ( img [ count ] [ 2 ] / 255.0 ) * 5 ) )
print_color ( ascii_char , end = '' , fg = color , bg = bg , bold = bold )
except :
if bg and bold :
print_color ( ascii_char , end = '' , fg = fg , bg = bg , bold = bold )
elif bold :
print_color ( ascii_char , end = '' , bold = bold )
elif bg :
print_color ( ascii_char , end = '' , fg = fg , bg = bg )
else :
print ( ascii_char , end = '' )
row_len += 1
if row_len == width :
row_len = 0
print ( '' )
|
def threshold ( self , n = None ) :
"""Returns the threshold number of tasks that need to be complete in order to consider the
workflow as being complete itself . This takes into account the
: py : attr : ` law . BaseWorkflow . acceptance ` parameter of the workflow . The threshold is passed
to the : py : class : ` law . TargetCollection ` ( or : py : class : ` law . SiblingFileCollection ` ) within
: py : meth : ` output ` . By default , the maximum number of tasks is taken from the length of the
branch map . For performance purposes , you can set this value , * n * , directly ."""
|
if n is None :
n = len ( self . task . branch_map ( ) )
acceptance = self . task . acceptance
return ( acceptance * n ) if acceptance <= 1 else acceptance
|
def use ( data , attrs ) :
"""Return the values of the attributes for the given data
: param data : the data
: param attrs : strings
: returns : a list
With a dict : :
> > > band = { ' name ' : ' Metallica ' , ' singer ' : ' James Hetfield ' , ' guitarist ' : ' Kirk Hammet ' }
> > > use ( band , ( ' name ' , ' date ' , ' singer ' ) )
[ ' Metallica ' , None , ' James Hetfield ' ]
With a non dict data structure : :
> > > from collections import namedtuple
> > > Person = namedtuple ( ' Person ' , ( ' name ' , ' age ' , ' gender ' ) )
> > > alice = Person ( ' Alice ' , 30 , ' F ' )
> > > use ( alice , ( ' name ' , ' gender ' ) )
[ ' Alice ' , ' F ' ]"""
|
if isinstance ( data , dict ) :
if not isiterable ( attrs ) :
attrs = [ attrs ]
coll = map ( data . get , attrs )
else :
coll = map ( lambda x : getattr ( data , x ) , attrs )
return coll
|
def __find_handles ( self , model , ** spec ) :
"""find model instances based on given filter ( spec )
The filter is based on available server - calls , so some values might not be available for filtering .
Multiple filter - values is going to do multiple server - calls .
For complex filters in small datasets , it might be faster to fetch all and do your own in - memory filter .
Empty filter will fetch all .
: param model : subclass of EConomicsModel
: param spec : mapping of values to filter by
: return : a list of EConomicsModel instances"""
|
server_calls = [ ]
filter_names = dict ( [ ( f [ 'name' ] , f [ 'method' ] , ) for f in model . get_filters ( ) ] )
if not spec :
server_calls . append ( { 'method' : "%s_GetAll" % model . __name__ , 'args' : [ ] } )
else :
for key , value in spec . items ( ) :
if not key in filter_names :
raise ValueError ( "no server-method exists for filtering by '%s'" % key )
args = [ ]
if not hasattr ( value , '__iter__' ) :
value = [ value ]
if key . endswith ( '_list' ) :
vtype = type ( value [ 0 ] ) . __name__
# TODO : this surely does not cover all cases of data types
array = self . soap_factory . create ( 'ArrayOf%s' % vtype . capitalize ( ) )
getattr ( array , "%s" % vtype ) . extend ( value )
args . append ( array )
else :
args . extend ( value )
method = "%s_%s" % ( model . __name__ , filter_names [ key ] )
if filter_names [ key ] . startswith ( 'GetAll' ) :
args = [ ]
server_calls . append ( { 'method' : method , 'args' : args , 'expect' : "%sHandle" % model . __name__ } )
handles = [ map ( Handle , self . fetch_list ( scall [ 'method' ] , scall . get ( 'expect' ) , * scall [ 'args' ] ) ) for scall in server_calls ]
return [ h . wsdl for h in reduce ( set . intersection , map ( set , handles ) ) ]
|
def getAnalystName ( self ) :
"""Returns the name of the currently assigned analyst"""
|
analyst = self . getAnalyst ( )
if not analyst :
return ""
user = api . get_user ( analyst . strip ( ) )
return user and user . getProperty ( "fullname" ) or analyst
|
def diff_compute ( self , text1 , text2 , checklines , deadline ) :
"""Find the differences between two texts . Assumes that the texts do not
have any common prefix or suffix .
Args :
text1 : Old string to be diffed .
text2 : New string to be diffed .
checklines : Speedup flag . If false , then don ' t run a line - level diff
first to identify the changed areas .
If true , then run a faster , slightly less optimal diff .
deadline : Time when the diff should be complete by .
Returns :
Array of changes ."""
|
if not text1 : # Just add some text ( speedup ) .
return [ ( self . DIFF_INSERT , text2 ) ]
if not text2 : # Just delete some text ( speedup ) .
return [ ( self . DIFF_DELETE , text1 ) ]
if len ( text1 ) > len ( text2 ) :
( longtext , shorttext ) = ( text1 , text2 )
else :
( shorttext , longtext ) = ( text1 , text2 )
i = longtext . find ( shorttext )
if i != - 1 : # Shorter text is inside the longer text ( speedup ) .
diffs = [ ( self . DIFF_INSERT , longtext [ : i ] ) , ( self . DIFF_EQUAL , shorttext ) , ( self . DIFF_INSERT , longtext [ i + len ( shorttext ) : ] ) ]
# Swap insertions for deletions if diff is reversed .
if len ( text1 ) > len ( text2 ) :
diffs [ 0 ] = ( self . DIFF_DELETE , diffs [ 0 ] [ 1 ] )
diffs [ 2 ] = ( self . DIFF_DELETE , diffs [ 2 ] [ 1 ] )
return diffs
if len ( shorttext ) == 1 : # Single character string .
# After the previous speedup , the character can ' t be an equality .
return [ ( self . DIFF_DELETE , text1 ) , ( self . DIFF_INSERT , text2 ) ]
# Check to see if the problem can be split in two .
hm = self . diff_halfMatch ( text1 , text2 )
if hm : # A half - match was found , sort out the return data .
( text1_a , text1_b , text2_a , text2_b , mid_common ) = hm
# Send both pairs off for separate processing .
diffs_a = self . diff_main ( text1_a , text2_a , checklines , deadline )
diffs_b = self . diff_main ( text1_b , text2_b , checklines , deadline )
# Merge the results .
return diffs_a + [ ( self . DIFF_EQUAL , mid_common ) ] + diffs_b
if checklines and len ( text1 ) > 100 and len ( text2 ) > 100 :
return self . diff_lineMode ( text1 , text2 , deadline )
return self . diff_bisect ( text1 , text2 , deadline )
|
def name ( self , name ) :
"""Sets the name of this FileCreateOrUpdateRequest .
File name . Should include type extension always when possible . Must not include slashes .
: param name : The name of this FileCreateOrUpdateRequest .
: type : str"""
|
if name is None :
raise ValueError ( "Invalid value for `name`, must not be `None`" )
if name is not None and len ( name ) > 128 :
raise ValueError ( "Invalid value for `name`, length must be less than or equal to `128`" )
if name is not None and len ( name ) < 1 :
raise ValueError ( "Invalid value for `name`, length must be greater than or equal to `1`" )
if name is not None and not re . search ( '^[^\/]+$' , name ) :
raise ValueError ( "Invalid value for `name`, must be a follow pattern or equal to `/^[^\/]+$/`" )
self . _name = name
|
def _validate_granttype ( self , path , obj , _ ) :
"""make sure either implicit or authorization _ code is defined"""
|
errs = [ ]
if not obj . implicit and not obj . authorization_code :
errs . append ( 'Either implicit or authorization_code should be defined.' )
return path , obj . __class__ . __name__ , errs
|
def unflat_unique_rowid_map ( func , unflat_rowids , ** kwargs ) :
"""performs only one call to the underlying func with unique rowids the func
must be some lookup function
TODO : move this to a better place .
CommandLine :
python - m utool . util _ list - - test - unflat _ unique _ rowid _ map : 0
python - m utool . util _ list - - test - unflat _ unique _ rowid _ map : 1
Example0:
> > > # ENABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > import utool as ut
> > > kwargs = { }
> > > unflat _ rowids = [ [ 1 , 2 , 3 ] , [ 2 , 5 ] , [ 1 ] , [ ] ]
> > > num _ calls0 = [ 0]
> > > num _ input0 = [ 0]
> > > def func0 ( rowids , num _ calls0 = num _ calls0 , num _ input0 = num _ input0 ) :
. . . num _ calls0[0 ] + = 1
. . . num _ input0[0 ] + = len ( rowids )
. . . return [ rowid + 10 for rowid in rowids ]
> > > func = func0
> > > unflat _ vals = unflat _ unique _ rowid _ map ( func , unflat _ rowids , * * kwargs )
> > > result = [ arr . tolist ( ) for arr in unflat _ vals ]
> > > print ( result )
> > > ut . assert _ eq ( num _ calls0[0 ] , 1)
> > > ut . assert _ eq ( num _ input0[0 ] , 4)
[ [ 11 , 12 , 13 ] , [ 12 , 15 ] , [ 11 ] , [ ] ]
Example1:
> > > # ENABLE _ DOCTEST
> > > from utool . util _ list import * # NOQA
> > > import utool as ut
> > > import numpy as np
> > > kwargs = { }
> > > unflat _ rowids = [ [ 1 , 2 , 3 ] , [ 2 , 5 ] , [ 1 ] , [ ] ]
> > > num _ calls1 = [ 0]
> > > num _ input1 = [ 0]
> > > def func1 ( rowids , num _ calls1 = num _ calls1 , num _ input1 = num _ input1 , np = np ) :
. . . num _ calls1[0 ] + = 1
. . . num _ input1[0 ] + = len ( rowids )
. . . return [ np . array ( [ rowid + 10 , rowid , 3 ] ) for rowid in rowids ]
> > > func = func1
> > > unflat _ vals = unflat _ unique _ rowid _ map ( func , unflat _ rowids , * * kwargs )
> > > result = [ arr . tolist ( ) for arr in unflat _ vals ]
> > > print ( result )
> > > ut . assert _ eq ( num _ calls1[0 ] , 1)
> > > ut . assert _ eq ( num _ input1[0 ] , 4)
[ [ [ 11 , 1 , 3 ] , [ 12 , 2 , 3 ] , [ 13 , 3 , 3 ] ] , [ [ 12 , 2 , 3 ] , [ 15 , 5 , 3 ] ] , [ [ 11 , 1 , 3 ] ] , [ ] ]"""
|
import utool as ut
# First flatten the list , and remember the original dimensions
flat_rowids , reverse_list = ut . invertible_flatten2 ( unflat_rowids )
# Then make the input unique
flat_rowids_arr = np . array ( flat_rowids )
unique_flat_rowids , inverse_unique = np . unique ( flat_rowids_arr , return_inverse = True )
# Then preform the lookup / implicit mapping
unique_flat_vals = func ( unique_flat_rowids , ** kwargs )
# Then broadcast unique values back to original flat positions
flat_vals_ = np . array ( unique_flat_vals ) [ inverse_unique ]
# flat _ vals _ = np . array ( unique _ flat _ vals ) . take ( inverse _ unique , axis = 0)
output_shape = tuple ( list ( flat_rowids_arr . shape ) + list ( flat_vals_ . shape [ 1 : ] ) )
flat_vals = np . array ( flat_vals_ ) . reshape ( output_shape )
# Then _ unflatten the results to the original input dimensions
unflat_vals = ut . unflatten2 ( flat_vals , reverse_list )
return unflat_vals
|
def split ( df ) :
"""Returns a tuple with down / up - cast ."""
|
idx = df . index . argmax ( ) + 1
down = df . iloc [ : idx ]
# Reverse index to orient it as a CTD cast .
up = df . iloc [ idx : ] [ : : - 1 ]
return down , up
|
def remove ( self , name ) :
"""Remove all attributes with the given * name * .
Raises : exc : ` ValueError ` if none were found ."""
|
attrs = [ attr for attr in self . attributes if attr . name == name . strip ( ) ]
if not attrs :
raise ValueError ( name )
for attr in attrs :
self . attributes . remove ( attr )
|
def expand_single_values ( var , scans ) :
"""Expand single valued variable to full scan lengths ."""
|
if scans . size == 1 :
return var
else :
expanded = np . repeat ( var , scans )
expanded . attrs = var . attrs
expanded . rename ( { expanded . dims [ 0 ] : 'y' } )
return expanded
|
def _get_next_time ( lines : [ dict ] , target : str ) -> str : # type : ignore
"""Returns the next FROM target value or empty"""
|
for line in lines :
if line [ target ] and not _is_tempo_or_prob ( line [ 'type' ] ) :
return line [ target ]
return ''
|
def none ( self ) :
"""Returns an empty QuerySet ."""
|
return EmptyQuerySet ( model = self . model , using = self . _using , connection = self . _connection )
|
def urls ( self , key , value ) :
"""Populate the ` ` url ` ` key .
Also populates the ` ` ids ` ` key through side effects ."""
|
description = force_single_element ( value . get ( 'y' ) )
url = value . get ( 'u' )
linkedin_match = LINKEDIN_URL . match ( url )
twitter_match = TWITTER_URL . match ( url )
wikipedia_match = WIKIPEDIA_URL . match ( url )
if linkedin_match :
self . setdefault ( 'ids' , [ ] ) . append ( { 'schema' : 'LINKEDIN' , 'value' : unquote_url ( linkedin_match . group ( 'page' ) ) , } )
elif twitter_match :
self . setdefault ( 'ids' , [ ] ) . append ( { 'schema' : 'TWITTER' , 'value' : twitter_match . group ( 'handle' ) , } )
elif wikipedia_match :
lang = wikipedia_match . group ( 'lang' )
page = unquote_url ( wikipedia_match . group ( 'page' ) )
if lang != 'en' :
page = ':' . join ( [ lang , page ] )
self . setdefault ( 'ids' , [ ] ) . append ( { 'schema' : 'WIKIPEDIA' , 'value' : page , } )
else :
return { 'description' : description , 'value' : url , }
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'sentiment' ) and self . sentiment is not None :
_dict [ 'sentiment' ] = self . sentiment
if hasattr ( self , 'emotion' ) and self . emotion is not None :
_dict [ 'emotion' ] = self . emotion
if hasattr ( self , 'limit' ) and self . limit is not None :
_dict [ 'limit' ] = self . limit
return _dict
|
def init_instance ( self , key ) :
"""Create an empty instance if it doesn ' t exist .
If the instance already exists , this is a noop ."""
|
with self . _lock :
if key not in self . _metadata :
self . _metadata [ key ] = { }
self . _metric_ids [ key ] = [ ]
|
def _add_filename_details ( full_f ) :
"""Add variant callers and germline information standard CWL filenames .
This is an ugly way of working around not having metadata with calls ."""
|
out = { "vrn_file" : full_f }
f = os . path . basename ( full_f )
for vc in list ( genotype . get_variantcallers ( ) . keys ( ) ) + [ "ensemble" ] :
if f . find ( "-%s.vcf" % vc ) > 0 :
out [ "variantcaller" ] = vc
if f . find ( "-germline-" ) >= 0 :
out [ "germline" ] = full_f
return out
|
def embed ( self , width = 600 , height = 650 ) :
"""Embed a viewer into a Jupyter notebook ."""
|
from IPython . display import IFrame
return IFrame ( self . url , width , height )
|
def topic_present ( name , subscriptions = None , attributes = None , region = None , key = None , keyid = None , profile = None ) :
'''Ensure the SNS topic exists .
name
Name of the SNS topic .
subscriptions
List of SNS subscriptions .
Each subscription is a dictionary with a protocol and endpoint key :
. . code - block : : yaml
subscriptions :
- Protocol : https
Endpoint : https : / / www . example . com / sns - endpoint
- Protocol : sqs
Endpoint : arn : aws : sqs : us - west - 2:123456789012 : MyQueue
Additional attributes which may be set on a subscription are :
- DeliveryPolicy
- FilterPolicy
- RawMessageDelivery
If provided , they should be passed as key / value pairs within the same dictionaries .
E . g .
. . code - block : : yaml
subscriptions :
- Protocol : sqs
Endpoint : arn : aws : sqs : us - west - 2:123456789012 : MyQueue
RawMessageDelivery : True
attributes
Dictionary of attributes to set on the SNS topic
Valid attribute keys are :
- Policy : the JSON serialization of the topic ' s access control policy
- DisplayName : the human - readable name used in the " From " field for notifications
to email and email - json endpoints
- DeliveryPolicy : the JSON serialization of the topic ' s delivery policy
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string )
that contains a dict with region , key and keyid .'''
|
ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
something_changed = False
current = __salt__ [ 'boto3_sns.describe_topic' ] ( name , region , key , keyid , profile )
if current :
ret [ 'comment' ] = 'AWS SNS topic {0} present.' . format ( name )
TopicArn = current [ 'TopicArn' ]
else :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'AWS SNS topic {0} would be created.' . format ( name )
ret [ 'result' ] = None
return ret
else :
TopicArn = __salt__ [ 'boto3_sns.create_topic' ] ( name , region = region , key = key , keyid = keyid , profile = profile )
if TopicArn :
ret [ 'comment' ] = 'AWS SNS topic {0} created with ARN {1}.' . format ( name , TopicArn )
something_changed = True
else :
ret [ 'comment' ] = 'Failed to create AWS SNS topic {0}' . format ( name )
log . error ( ret [ 'comment' ] )
ret [ 'result' ] = False
return ret
# # # Update any explicitly defined attributes
want_attrs = attributes if attributes else { }
# Freshen these in case we just created it above
curr_attrs = __salt__ [ 'boto3_sns.get_topic_attributes' ] ( TopicArn , region = region , key = key , keyid = keyid , profile = profile )
for attr in [ 'DisplayName' , 'Policy' , 'DeliveryPolicy' ] :
curr_val = curr_attrs . get ( attr )
want_val = want_attrs . get ( attr )
# Some get default values if not set , so it ' s not safe to enforce absense if they ' re
# not provided at all . This implies that if you want to clear a value , you must explicitly
# set it to an empty string .
if want_val is None :
continue
if _json_objs_equal ( want_val , curr_val ) :
continue
if __opts__ [ 'test' ] :
ret [ 'comment' ] += ' Attribute {} would be updated on topic {}.' . format ( attr , TopicArn )
ret [ 'result' ] = None
continue
want_val = want_val if isinstance ( want_val , six . string_types ) else salt . utils . json . dumps ( want_val )
if __salt__ [ 'boto3_sns.set_topic_attributes' ] ( TopicArn , attr , want_val , region = region , key = key , keyid = keyid , profile = profile ) :
ret [ 'comment' ] += ' Attribute {0} set to {1} on topic {2}.' . format ( attr , want_val , TopicArn )
something_changed = True
else :
ret [ 'comment' ] += ' Failed to update {0} on topic {1}.' . format ( attr , TopicArn )
ret [ 'result' ] = False
return ret
# # # Add / remove subscriptions
mutable_attrs = ( 'DeliveryPolicy' , 'FilterPolicy' , 'RawMessageDelivery' )
want_subs = subscriptions if subscriptions else [ ]
want_subs = [ { k : v for k , v in c . items ( ) if k in ( 'Protocol' , 'Endpoint' ) or k in mutable_attrs } for c in want_subs ]
curr_subs = current . get ( 'Subscriptions' , [ ] )
subscribe = [ ]
unsubscribe = [ ]
want_obfuscated = [ ]
for sub in want_subs : # If the subscription contains inline digest auth , AWS will obfuscate the password
# with ' * * * * ' . Thus we need to do the same with ours to permit 1 - to - 1 comparison .
# Example : https : / / user : * * * * @ my . endpoiint . com / foo / bar
endpoint = sub [ 'Endpoint' ]
matches = re . search ( r'http[s]?://(?P<user>\w+):(?P<pass>\w+)@' , endpoint )
if matches is not None :
sub [ 'Endpoint' ] = endpoint . replace ( ':' + matches . groupdict ( ) [ 'pass' ] , ':****' )
want_obfuscated += [ { 'Protocol' : sub [ 'Protocol' ] , 'Endpoint' : sub [ 'Endpoint' ] } ]
if sub not in curr_subs :
sub [ 'obfuscated' ] = sub [ 'Endpoint' ]
sub [ 'Endpoint' ] = endpoint
# Set it back to the unobfuscated value .
subscribe += [ sub ]
for sub in curr_subs :
if { 'Protocol' : sub [ 'Protocol' ] , 'Endpoint' : sub [ 'Endpoint' ] } not in want_obfuscated :
if sub [ 'SubscriptionArn' ] . startswith ( 'arn:aws:sns:' ) :
unsubscribe += [ sub [ 'SubscriptionArn' ] ]
for sub in subscribe :
ret = _create_or_update_subscription ( ret , sub , curr_subs , mutable_attrs , TopicArn , region , key , keyid , profile )
if ret . pop ( 'something_changed' , False ) is True :
something_changed = True
for sub in unsubscribe :
if __opts__ [ 'test' ] :
msg = ' Subscription {} would be removed from topic {}.' . format ( sub , TopicArn )
ret [ 'comment' ] += msg
ret [ 'result' ] = None
continue
unsubbed = __salt__ [ 'boto3_sns.unsubscribe' ] ( sub , region = region , key = key , keyid = keyid , profile = profile )
if unsubbed :
ret [ 'comment' ] += ' Subscription {0} removed from topic {1}.' . format ( sub , TopicArn )
something_changed = True
else :
msg = ' Failed to remove subscription {0} from topic {1}.' . format ( sub , TopicArn )
ret [ 'comment' ] += msg
ret [ 'result' ] = False
return ret
if something_changed :
ret [ 'changes' ] [ 'old' ] = current
ret [ 'changes' ] [ 'new' ] = __salt__ [ 'boto3_sns.describe_topic' ] ( name , region , key , keyid , profile )
return ret
|
def remove_docstrings ( tokens ) :
"""Removes docstrings from * tokens * which is expected to be a list equivalent
of ` tokenize . generate _ tokens ( ) ` ( so we can update in - place ) ."""
|
prev_tok_type = None
for index , tok in enumerate ( tokens ) :
token_type = tok [ 0 ]
if token_type == tokenize . STRING :
if prev_tok_type == tokenize . INDENT : # Definitely a docstring
tokens [ index ] [ 1 ] = ''
# Remove it
# Remove the leftover indentation and newline :
tokens [ index - 1 ] [ 1 ] = ''
tokens [ index - 2 ] [ 1 ] = ''
elif prev_tok_type == tokenize . NL : # This captures whole - module docstrings :
if tokens [ index + 1 ] [ 0 ] == tokenize . NEWLINE :
tokens [ index ] [ 1 ] = ''
# Remove the trailing newline :
tokens [ index + 1 ] [ 1 ] = ''
prev_tok_type = token_type
|
def _parse_gene_anatomy ( self , fh , limit ) :
"""Process anat _ entity files with columns :
Ensembl gene ID , gene name , anatomical entity ID ,
anatomical entity name , rank score , XRefs to BTO
: param fh : filehandle
: param limit : int , limit per group
: return : None"""
|
dataframe = pd . read_csv ( fh , sep = '\t' )
col = self . files [ 'anat_entity' ] [ 'columns' ]
if list ( dataframe ) != col :
LOG . warning ( '\nExpected headers: %s\nRecived headers: %s' , col , list ( dataframe ) )
gene_groups = dataframe . sort_values ( 'rank score' , ascending = False ) . groupby ( 'Ensembl gene ID' )
if limit is None :
limit = 20
gene_groups = gene_groups . head ( limit ) . groupby ( 'Ensembl gene ID' )
for gene , group in gene_groups :
for index , row in group . iterrows ( ) :
self . _add_gene_anatomy_association ( row [ 'Ensembl gene ID' ] . strip ( ) , row [ 'anatomical entity ID' ] . strip ( ) , row [ 'rank score' ] )
# uberon < = = > bto equivelance ?
return
|
def image ( self , render_mode ) :
"""Return an image generated with a particular render mode .
Parameters
render _ mode : : obj : ` RenderMode `
The type of image we want .
Returns
: obj : ` Image `
The color , depth , or binary image if render _ mode is
COLOR , DEPTH , or SEGMASK respectively ."""
|
if render_mode == RenderMode . COLOR :
return self . color_im
elif render_mode == RenderMode . DEPTH :
return self . depth_im
elif render_mode == RenderMode . SEGMASK :
return self . binary_im
else :
return None
|
def advise ( self , name , f , * a , ** kw ) :
"""Add an advice that will be handled later by the handle method .
Arguments :
name
The name of the advice group
A callable method or function .
The rest of the arguments will be passed as arguments and
keyword arguments to f when it ' s invoked ."""
|
if name is None :
return
advice = ( f , a , kw )
debug = self . get ( DEBUG )
frame = currentframe ( )
if frame is None :
logger . debug ( 'currentframe() failed to return frame' )
else :
if name in self . _called :
self . __advice_stack_frame_protection ( frame )
if debug :
logger . debug ( "advise '%s' invoked by %s:%d" , name , frame . f_back . f_code . co_filename , frame . f_back . f_lineno , )
if debug > 1 : # use the memory address of the tuple which should
# be stable
self . _frames [ id ( advice ) ] = '' . join ( format_stack ( frame . f_back ) )
self . _advices [ name ] = self . _advices . get ( name , [ ] )
self . _advices [ name ] . append ( advice )
|
async def _check_latch_data ( self , key , data ) :
"""This is a private utility method .
When a data change message is received this method checks to see if
latching needs to be processed
: param key : encoded pin number
: param data : data change
: returns : None"""
|
process = False
latching_entry = self . latch_map . get ( key )
if latching_entry [ Constants . LATCH_STATE ] == Constants . LATCH_ARMED : # Has the latching criteria been met
if latching_entry [ Constants . LATCHED_THRESHOLD_TYPE ] == Constants . LATCH_EQ :
if data == latching_entry [ Constants . LATCH_DATA_TARGET ] :
process = True
elif latching_entry [ Constants . LATCHED_THRESHOLD_TYPE ] == Constants . LATCH_GT :
if data > latching_entry [ Constants . LATCH_DATA_TARGET ] :
process = True
elif latching_entry [ Constants . LATCHED_THRESHOLD_TYPE ] == Constants . LATCH_GTE :
if data >= latching_entry [ Constants . LATCH_DATA_TARGET ] :
process = True
elif latching_entry [ Constants . LATCHED_THRESHOLD_TYPE ] == Constants . LATCH_LT :
if data < latching_entry [ Constants . LATCH_DATA_TARGET ] :
process = True
elif latching_entry [ Constants . LATCHED_THRESHOLD_TYPE ] == Constants . LATCH_LTE :
if data <= latching_entry [ Constants . LATCH_DATA_TARGET ] :
process = True
if process :
latching_entry [ Constants . LATCHED_DATA ] = data
await self . _process_latching ( key , latching_entry )
|
def get_third_party ( self , third_party ) :
"""Return the account for the given third - party . Raise < something > if the third party doesn ' t belong to this bookset ."""
|
actual_account = third_party . get_account ( )
assert actual_account . get_bookset ( ) == self . get_bookset ( )
return ProjectAccount ( actual_account , project = self , third_party = third_party )
|
def reference ( self , refobj , taskfileinfo ) :
"""Reference the given taskfileinfo into the scene and return the created reference node
The created reference node will be used on : meth : ` RefobjInterface . set _ reference ` to
set the reference on a reftrack node .
Do not call : meth : ` RefobjInterface . set _ reference ` yourself .
This will also create a group node and group all dagnodes under a appropriate node .
: param refobj : the reftrack node that will be linked to the reference
: type refobj : str
: param taskfileinfo : The taskfileinfo that holds the information for what to reference
: type taskfileinfo : : class : ` jukeboxcore . filesys . TaskFileInfo `
: returns : the reference node that was created and should set on the appropriate reftrack node
: rtype : str
: raises : None"""
|
# work in root namespace
with common . preserve_namespace ( ":" ) :
jbfile = JB_File ( taskfileinfo )
filepath = jbfile . get_fullpath ( )
ns_suggestion = reftrack . get_namespace ( taskfileinfo )
newnodes = cmds . file ( filepath , reference = True , namespace = ns_suggestion , returnNewNodes = True )
# You could also use the filename returned by the file command to query the reference node .
# Atm there is a but , that if you import the file before , the command fails .
# So we get all new reference nodes and query the one that is not referenced
for refnode in cmds . ls ( newnodes , type = 'reference' ) :
if not cmds . referenceQuery ( refnode , isNodeReferenced = True ) :
node = refnode
break
ns = cmds . referenceQuery ( node , namespace = True )
# query the actual new namespace
content = cmds . namespaceInfo ( ns , listOnlyDependencyNodes = True , dagPath = True )
# get the content
# connect reftrack with scenenode
scenenode = self . get_scenenode ( content )
self . get_refobjinter ( ) . connect_reftrack_scenenode ( refobj , scenenode )
reccontent = cmds . namespaceInfo ( ns , listOnlyDependencyNodes = True , dagPath = True , recurse = True )
# get the content + content of children
dagcontent = cmds . ls ( reccontent , ap = True , assemblies = True )
# get only the top level dagnodes so we can group them
if not dagcontent :
return node
# no need for a top group if there are not dagnodes to group
# group the dagnodes
grpname = reftrack . get_groupname ( taskfileinfo )
reftrack . group_content ( dagcontent , ns , grpname , "jb_asset" )
return node
|
def expire ( self , key , timeout ) :
"""Set a timeout on key . After the timeout has expired , the key will
automatically be deleted . A key with an associated timeout is often
said to be volatile in Redis terminology .
The timeout is cleared only when the key is removed using the
: meth : ` ~ tredis . RedisClient . delete ` method or overwritten using the
: meth : ` ~ tredis . RedisClient . set ` or : meth : ` ~ tredis . RedisClient . getset `
methods . This means that all the operations that conceptually alter the
value stored at the key without replacing it with a new one will leave
the timeout untouched . For instance , incrementing the value of a key
with : meth : ` ~ tredis . RedisClient . incr ` , pushing a new value into a
list with : meth : ` ~ tredis . RedisClient . lpush ` , or altering the field
value of a hash with : meth : ` ~ tredis . RedisClient . hset ` are all
operations that will leave the timeout untouched .
The timeout can also be cleared , turning the key back into a persistent
key , using the : meth : ` ~ tredis . RedisClient . persist ` method .
If a key is renamed with : meth : ` ~ tredis . RedisClient . rename ` ,
the associated time to live is transferred to the new key name .
If a key is overwritten by : meth : ` ~ tredis . RedisClient . rename ` , like in
the case of an existing key ` ` Key _ A ` ` that is overwritten by a call
like ` ` client . rename ( Key _ B , Key _ A ) ` ` it does not matter if the original
` ` Key _ A ` ` had a timeout associated or not , the new key ` ` Key _ A ` ` will
inherit all the characteristics of ` ` Key _ B ` ` .
. . note : :
* * Time complexity * * : ` ` O ( 1 ) ` `
: param key : The key to set an expiration for
: type key : : class : ` str ` , : class : ` bytes `
: param int timeout : The number of seconds to set the timeout to
: rtype : bool
: raises : : exc : ` ~ tredis . exceptions . RedisError `"""
|
return self . _execute ( [ b'EXPIRE' , key , ascii ( timeout ) . encode ( 'ascii' ) ] , 1 )
|
def static_fit_result ( fit_result , v_residual = None , v_label = 'Unit-cell volume $(\mathrm{\AA}^3)$' , figsize = ( 5 , 5 ) , height_ratios = ( 3 , 1 ) , ms_data = 8 , p_err = None , v_err = None , pdf_filen = None , title = 'Fit result' ) :
"""plot static compressional curve fitting result
: param fit _ result : lmfit result object , see example jnb file for detail
: param v _ residual : manual input of fit residual
: param v _ label : label for volume axis
: param figsize : figure size
: param fitline : manual input of fit line , array of ( v _ fitline , p _ fitline )
with uncertainties
: param height _ ratios : height ratio between the main and residue plots
: param ms _ data : marker size for data points
: param p _ err : pressure error bar
: param v _ err : volume error bar
: param pdf _ filen : name of pdf output file
: param title : title of the figure
: return : None"""
|
# basic figure setup
f , ax = plt . subplots ( 2 , 1 , sharex = True , figsize = figsize , gridspec_kw = { 'height_ratios' : height_ratios } )
for ax_i in ax :
ax_i . tick_params ( direction = 'in' )
# read data to plot
v_data = fit_result . userkws [ 'v' ]
p_data = fit_result . data
p_datafit = fit_result . best_fit
v0 = uct . ufloat ( fit_result . params [ 'v0' ] . value , fit_result . params [ 'v0' ] . stderr )
k0 = uct . ufloat ( fit_result . params [ 'k0' ] . value , fit_result . params [ 'k0' ] . stderr )
k0p = uct . ufloat ( fit_result . params [ 'k0p' ] . value , fit_result . params [ 'k0p' ] . stderr )
# I don ' t know why but fit _ result . residual is not p _ data - p _ fit
# p _ residual = fit _ result . residual
# setup fitline and uncertainties
v_fitline = np . linspace ( v0 . n , min ( v_data ) , 1000 )
p_fit = fit_result . model . func ( v_fitline , v0 , k0 , k0p )
p_fitline = unp . nominal_values ( p_fit )
# I am unsure about why I have to divide by 3 in order to fit the
# eval _ uncertainty result from lmfit . For now , I do this to be
# consistent
del_p = unp . std_devs ( p_fit ) / 3.
# v _ fitline = v _ data
# p _ fitline = p _ datafit
# del _ p = fit _ result . eval _ uncertainty ( )
# ax [ 0 ] . plot ( p _ data , v _ data , ' ko ' , ms = ms _ data , label = ' Data ' )
ax [ 0 ] . errorbar ( p_data , v_data , xerr = p_err , yerr = v_err , fmt = 'ko' , ms = ms_data , mec = 'w' , capsize = 0 , elinewidth = 0.5 , label = 'Data' )
ax [ 0 ] . plot ( p_fitline , v_fitline , 'k-' , label = 'Fit' )
ax [ 0 ] . fill_betweenx ( v_fitline , p_fitline - del_p , p_fitline + del_p , color = "k" , alpha = 0.2 )
if v_residual is None : # ax [ 1 ] . plot ( p _ data , p _ data - fit _ result . best _ fit , ' ko ' , ms = ms _ data )
ax [ 1 ] . errorbar ( p_data , p_data - p_datafit , yerr = p_err , fmt = 'ko' , ms = ms_data , mec = 'w' , capsize = 0 , elinewidth = 0.5 )
ax [ 1 ] . set_ylabel ( '$P_{obs} - P_{fit}$' )
ax [ 1 ] . fill_between ( p_fitline , - 1. * del_p , del_p , color = "k" , alpha = 0.2 )
else : # ax [ 1 ] . plot ( p _ data , v _ data - v _ fit , ' ko ' , ms = ms _ data )
ax [ 1 ] . errorbar ( p_data , v_residual , yerr = v_err , fmt = 'ko' , ms = ms_data , mec = 'w' , capsize = 0 , elinewidth = 0.5 )
ax [ 1 ] . set_ylabel ( '$V_{obs} - V_{fit}$' )
ax [ 0 ] . legend ( )
ax [ 1 ] . axhline ( 0 , c = 'k' , ls = '--' )
ax [ 1 ] . set_xlabel ( 'Pressure (GPa)' )
ax [ 0 ] . set_ylabel ( v_label )
ax [ 0 ] . set_title ( title )
plt . tight_layout ( )
if pdf_filen is not None :
f . savefig ( pdf_filen )
|
def add_phrase ( self , phrase : List [ int ] ) -> None :
"""Recursively adds a phrase to this trie node .
: param phrase : A list of word IDs to add to this trie node ."""
|
if len ( phrase ) == 1 :
self . final_ids . add ( phrase [ 0 ] )
else :
next_word = phrase [ 0 ]
if next_word not in self . children :
self . children [ next_word ] = AvoidTrie ( )
self . step ( next_word ) . add_phrase ( phrase [ 1 : ] )
|
def create ( model_config , reinforcer , optimizer , storage , total_frames , batches_per_epoch , callbacks = None , scheduler = None , openai_logging = False ) :
"""Vel factory function"""
|
from vel . openai . baselines import logger
logger . configure ( dir = model_config . openai_dir ( ) )
return RlTrainCommand ( model_config = model_config , reinforcer = reinforcer , optimizer_factory = optimizer , scheduler_factory = scheduler , storage = storage , callbacks = callbacks , total_frames = int ( float ( total_frames ) ) , batches_per_epoch = int ( batches_per_epoch ) , openai_logging = openai_logging )
|
def generate_specifications ( self , count = 1 ) :
"""Returns a mapping of count - > specification"""
|
out = { }
# mapping of UID index to AR objects { 1 : < AR1 > , 2 : < AR2 > . . . }
copy_from = self . get_copy_from ( )
for arnum in range ( count ) : # get the source object
source = copy_from . get ( arnum )
if source is None :
out [ arnum ] = { }
continue
# get the results range from the source object
results_range = source . getResultsRange ( )
# mapping of keyword - > rr specification
specification = { }
for rr in results_range :
specification [ rr . get ( "keyword" ) ] = rr
out [ arnum ] = specification
return out
|
def get_order_book ( self , code ) :
"""获取实时摆盘数据
: param code : 股票代码
: return : ( ret , data )
ret = = RET _ OK 返回字典 , 数据格式如下
ret ! = RET _ OK 返回错误字符串
{ ‘ code ’ : 股票代码
‘ Ask ’ : [ ( ask _ price1 , ask _ volume1 , order _ num ) , ( ask _ price2 , ask _ volume2 , order _ num ) , . . . ]
‘ Bid ’ : [ ( bid _ price1 , bid _ volume1 , order _ num ) , ( bid _ price2 , bid _ volume2 , order _ num ) , . . . ]
' Ask ' : 卖盘 , ' Bid ' 买盘 。 每个元组的含义是 ( 委托价格 , 委托数量 , 委托订单数 )"""
|
if code is None or is_str ( code ) is False :
error_str = ERROR_STR_PREFIX + "the type of code param is wrong"
return RET_ERROR , error_str
query_processor = self . _get_sync_query_processor ( OrderBookQuery . pack_req , OrderBookQuery . unpack_rsp , )
kargs = { "code" : code , "conn_id" : self . get_sync_conn_id ( ) }
ret_code , msg , orderbook = query_processor ( ** kargs )
if ret_code == RET_ERROR :
return ret_code , msg
return RET_OK , orderbook
|
def make_application_private ( application_id , sar_client = None ) :
"""Set the application to be private .
: param application _ id : The Amazon Resource Name ( ARN ) of the application
: type application _ id : str
: param sar _ client : The boto3 client used to access SAR
: type sar _ client : boto3 . client
: raises ValueError"""
|
if not application_id :
raise ValueError ( 'Require application id to make the app private' )
if not sar_client :
sar_client = boto3 . client ( 'serverlessrepo' )
sar_client . put_application_policy ( ApplicationId = application_id , Statements = [ ] )
|
def fetch_data_table ( api_key , show_progress , retries ) :
"""Fetch WIKI Prices data table from Quandl"""
|
for _ in range ( retries ) :
try :
if show_progress :
log . info ( 'Downloading WIKI metadata.' )
metadata = pd . read_csv ( format_metadata_url ( api_key ) )
# Extract link from metadata and download zip file .
table_url = metadata . loc [ 0 , 'file.link' ]
if show_progress :
raw_file = download_with_progress ( table_url , chunk_size = ONE_MEGABYTE , label = "Downloading WIKI Prices table from Quandl" )
else :
raw_file = download_without_progress ( table_url )
return load_data_table ( file = raw_file , index_col = None , show_progress = show_progress , )
except Exception :
log . exception ( "Exception raised reading Quandl data. Retrying." )
else :
raise ValueError ( "Failed to download Quandl data after %d attempts." % ( retries ) )
|
def revoke ( self , cidr_ip = None , ec2_group = None ) :
"""Revoke access to a CIDR range or EC2 SecurityGroup .
You need to pass in either a CIDR block or
an EC2 SecurityGroup from which to revoke access .
@ type cidr _ ip : string
@ param cidr _ ip : A valid CIDR IP range to revoke
@ type ec2 _ group : : class : ` boto . ec2 . securitygroup . SecurityGroup > `
@ rtype : bool
@ return : True if successful ."""
|
if isinstance ( ec2_group , SecurityGroup ) :
group_name = ec2_group . name
group_owner_id = ec2_group . owner_id
return self . connection . revoke_dbsecurity_group ( self . name , ec2_security_group_name = group_name , ec2_security_group_owner_id = group_owner_id )
# Revoking by CIDR IP range
return self . connection . revoke_dbsecurity_group ( self . name , cidr_ip = cidr_ip )
|
def _tag_and_field_maker ( self , event ) :
'''> > > idbf = InfluxDBForwarder ( ' no _ host ' , ' 8086 ' , ' deadpool ' ,
. . . ' chimichanga ' , ' logs ' , ' collection ' )
> > > log = { u ' data ' : { u ' _ ' : { u ' file ' : u ' log . py ' ,
. . . u ' fn ' : u ' start ' ,
. . . u ' ln ' : 8,
. . . u ' name ' : u ' _ _ main _ _ ' } ,
. . . u ' a ' : 1,
. . . u ' b ' : 2,
. . . u ' _ _ ignore _ this ' : ' some _ string ' ,
. . . u ' msg ' : u ' this is a dummy log ' } ,
. . . u ' error ' : False ,
. . . u ' error _ tb ' : u ' ' ,
. . . u ' event ' : u ' some _ log ' ,
. . . u ' file ' : u ' / var / log / sample . log ' ,
. . . u ' formatter ' : u ' logagg . formatters . basescript ' ,
. . . u ' host ' : u ' deepcompute ' ,
. . . u ' id ' : u ' 20180409T095924 _ aec36d313bdc11e89da654e1ad04f45e ' ,
. . . u ' level ' : u ' info ' ,
. . . u ' raw ' : u ' { . . . } ' ,
. . . u ' timestamp ' : u ' 2018-04-09T09:59:24.733945Z ' ,
. . . u ' type ' : u ' metric ' }
> > > tags , fields = idbf . _ tag _ and _ field _ maker ( log )
> > > from pprint import pprint
> > > pprint ( tags )
{ u ' data . msg ' : u ' this is a dummy log ' ,
u ' error _ tb ' : u ' ' ,
u ' file ' : u ' / var / log / sample . log ' ,
u ' formatter ' : u ' logagg . formatters . basescript ' ,
u ' host ' : u ' deepcompute ' ,
u ' level ' : u ' info ' }
> > > pprint ( fields )
{ u ' data . _ ' : " { u ' ln ' : 8 , u ' fn ' : u ' start ' , u ' file ' : u ' log . py ' , u ' name ' : u ' _ _ main _ _ ' } " ,
u ' data . a ' : 1,
u ' data . b ' : 2}'''
|
data = event . pop ( 'data' )
data = flatten_dict ( { 'data' : data } )
t = dict ( ( k , event [ k ] ) for k in event if k not in self . EXCLUDE_TAGS )
f = dict ( )
for k in data :
v = data [ k ]
if is_number ( v ) or isinstance ( v , MarkValue ) :
f [ k ] = v
else : # if v . startswith ( ' _ ' ) : f [ k ] = eval ( v . split ( ' _ ' , 1 ) [ 1 ] )
t [ k ] = v
return t , f
|
def compute_edges ( edges ) :
"""Computes edges as midpoints of the bin centers . The first and
last boundaries are equidistant from the first and last midpoints
respectively ."""
|
edges = np . asarray ( edges )
if edges . dtype . kind == 'i' :
edges = edges . astype ( 'f' )
midpoints = ( edges [ : - 1 ] + edges [ 1 : ] ) / 2.0
boundaries = ( 2 * edges [ 0 ] - midpoints [ 0 ] , 2 * edges [ - 1 ] - midpoints [ - 1 ] )
return np . concatenate ( [ boundaries [ : 1 ] , midpoints , boundaries [ - 1 : ] ] )
|
def check_settings ( self , settings ) :
"""Checks the settings info .
: param settings : Dict with settings data
: type settings : dict
: returns : Errors found on the settings data
: rtype : list"""
|
assert isinstance ( settings , dict )
errors = [ ]
if not isinstance ( settings , dict ) or len ( settings ) == 0 :
errors . append ( 'invalid_syntax' )
else :
if not self . __sp_validation_only :
errors += self . check_idp_settings ( settings )
sp_errors = self . check_sp_settings ( settings )
errors += sp_errors
return errors
|
def isreal ( obj ) :
"""Test if the argument is a real number ( float or integer ) .
: param obj : Object
: type obj : any
: rtype : boolean"""
|
return ( ( obj is not None ) and ( not isinstance ( obj , bool ) ) and isinstance ( obj , ( int , float ) ) )
|
def _parse ( encoded_data , data_len , pointer = 0 , lengths_only = False ) :
"""Parses a byte string into component parts
: param encoded _ data :
A byte string that contains BER - encoded data
: param data _ len :
The integer length of the encoded data
: param pointer :
The index in the byte string to parse from
: param lengths _ only :
A boolean to cause the call to return a 2 - element tuple of the integer
number of bytes in the header and the integer number of bytes in the
contents . Internal use only .
: return :
A 2 - element tuple :
- 0 : A tuple of ( class _ , method , tag , header , content , trailer )
- 1 : An integer indicating how many bytes were consumed"""
|
if data_len < pointer + 2 :
raise ValueError ( _INSUFFICIENT_DATA_MESSAGE % ( 2 , data_len - pointer ) )
start = pointer
first_octet = ord ( encoded_data [ pointer ] ) if _PY2 else encoded_data [ pointer ]
pointer += 1
tag = first_octet & 31
# Base 128 length using 8th bit as continuation indicator
if tag == 31 :
tag = 0
while True :
num = ord ( encoded_data [ pointer ] ) if _PY2 else encoded_data [ pointer ]
pointer += 1
tag *= 128
tag += num & 127
if num >> 7 == 0 :
break
length_octet = ord ( encoded_data [ pointer ] ) if _PY2 else encoded_data [ pointer ]
pointer += 1
if length_octet >> 7 == 0 :
if lengths_only :
return ( pointer , pointer + ( length_octet & 127 ) )
contents_end = pointer + ( length_octet & 127 )
else :
length_octets = length_octet & 127
if length_octets :
pointer += length_octets
contents_end = pointer + int_from_bytes ( encoded_data [ pointer - length_octets : pointer ] , signed = False )
if lengths_only :
return ( pointer , contents_end )
else : # To properly parse indefinite length values , we need to scan forward
# parsing headers until we find a value with a length of zero . If we
# just scanned looking for \ x00 \ x00 , nested indefinite length values
# would not work .
contents_end = pointer
# Unfortunately we need to understand the contents of the data to
# properly scan forward , which bleeds some representation info into
# the parser . This condition handles the unused bits byte in
# constructed bit strings .
if tag == 3 :
contents_end += 1
while contents_end < data_len :
sub_header_end , contents_end = _parse ( encoded_data , data_len , contents_end , lengths_only = True )
if contents_end == sub_header_end and encoded_data [ contents_end - 2 : contents_end ] == b'\x00\x00' :
break
if lengths_only :
return ( pointer , contents_end )
if contents_end > data_len :
raise ValueError ( _INSUFFICIENT_DATA_MESSAGE % ( contents_end , data_len ) )
return ( ( first_octet >> 6 , ( first_octet >> 5 ) & 1 , tag , encoded_data [ start : pointer ] , encoded_data [ pointer : contents_end - 2 ] , b'\x00\x00' ) , contents_end )
if contents_end > data_len :
raise ValueError ( _INSUFFICIENT_DATA_MESSAGE % ( contents_end , data_len ) )
return ( ( first_octet >> 6 , ( first_octet >> 5 ) & 1 , tag , encoded_data [ start : pointer ] , encoded_data [ pointer : contents_end ] , b'' ) , contents_end )
|
def submit_mult_calcs ( calc_suite_specs , exec_options = None ) :
"""Generate and execute all specified computations .
Once the calculations are prepped and submitted for execution , any
calculation that triggers any exception or error is skipped , and the rest
of the calculations proceed unaffected . This prevents an error in a single
calculation from crashing a large suite of calculations .
Parameters
calc _ suite _ specs : dict
The specifications describing the full set of calculations to be
generated and potentially executed . Accepted keys and their values :
library : module or package comprising an aospy object library
The aospy object library for these calculations .
projects : list of aospy . Proj objects
The projects to permute over .
models : ' all ' , ' default ' , or list of aospy . Model objects
The models to permute over . If ' all ' , use all models in the
` ` models ` ` attribute of each ` ` Proj ` ` . If ' default ' , use all
models in the ` ` default _ models ` ` attribute of each ` ` Proj ` ` .
runs : ' all ' , ' default ' , or list of aospy . Run objects
The runs to permute over . If ' all ' , use all runs in the
` ` runs ` ` attribute of each ` ` Model ` ` . If ' default ' , use all
runs in the ` ` default _ runs ` ` attribute of each ` ` Model ` ` .
variables : list of aospy . Var objects
The variables to be calculated .
regions : ' all ' or list of aospy . Region objects
The region ( s ) over which any regional reductions will be performed .
If ' all ' , use all regions in the ` ` regions ` ` attribute of each
` ` Proj ` ` .
date _ ranges : ' default ' or a list of tuples
The range of dates ( inclusive ) over which to perform calculations .
If ' default ' , use the ` ` default _ start _ date ` ` and
` ` default _ end _ date ` ` attribute of each ` ` Run ` ` . Else provide a
list of tuples , each containing a pair of start and end dates ,
such as ` ` date _ ranges = [ ( start , end ) ] ` ` where ` ` start ` ` and
` ` end ` ` are each ` ` datetime . datetime ` ` objects , partial
datetime strings ( e . g . ' 0001 ' ) , ` ` np . datetime64 ` ` objects , or
` ` cftime . datetime ` ` objects .
output _ time _ intervals : { ' ann ' , season - string , month - integer }
The sub - annual time interval over which to aggregate .
- ' ann ' : Annual mean
- season - string : E . g . ' JJA ' for June - July - August
- month - integer : 1 for January , 2 for February , etc . Each one is
a separate reduction , e . g . [ 1 , 2 ] would produce averages ( or
other specified time reduction ) over all Januaries , and
separately over all Februaries .
output _ time _ regional _ reductions : list of reduction string identifiers
Unlike most other keys , these are not permuted over when creating
the : py : class : ` aospy . Calc ` objects that execute the calculations ;
each : py : class : ` aospy . Calc ` performs all of the specified
reductions . Accepted string identifiers are :
- Gridpoint - by - gridpoint output :
- ' av ' : Gridpoint - by - gridpoint time - average
- ' std ' : Gridpoint - by - gridpoint temporal standard deviation
- ' ts ' : Gridpoint - by - gridpoint time - series
- Averages over each region specified via ` region ` :
- ' reg . av ' , ' reg . std ' , ' reg . ts ' : analogous to ' av ' , ' std ' , ' ts '
output _ vertical _ reductions : { None , ' vert _ av ' , ' vert _ int ' } , optional
How to reduce the data vertically :
- None : no vertical reduction
- ' vert _ av ' : mass - weighted vertical average
- ' vert _ int ' : mass - weighted vertical integral
input _ time _ intervals : { ' annual ' , ' monthly ' , ' daily ' , ' # hr ' }
A string specifying the time resolution of the input data . In
' # hr ' above , the ' # ' stands for a number , e . g . 3hr or 6hr , for
sub - daily output . These are the suggested specifiers , but others
may be used if they are also used by the DataLoaders for the given
Runs .
input _ time _ datatypes : { ' inst ' , ' ts ' , ' av ' }
What the time axis of the input data represents :
- ' inst ' : Timeseries of instantaneous values
- ' ts ' : Timeseries of averages over the period of each time - index
- ' av ' : A single value averaged over a date range
input _ vertical _ datatypes : { False , ' pressure ' , ' sigma ' } , optional
The vertical coordinate system used by the input data :
- False : not defined vertically
- ' pressure ' : pressure coordinates
- ' sigma ' : hybrid sigma - pressure coordinates
input _ time _ offsets : { None , dict } , optional
How to offset input data in time to correct for metadata errors
- None : no time offset applied
- dict : e . g . ` ` { ' hours ' : - 3 } ` ` to offset times by - 3 hours
See : py : meth : ` aospy . utils . times . apply _ time _ offset ` .
exec _ options : dict or None ( default None )
Options regarding how the calculations are reported , submitted , and
saved . If None , default settings are used for all options . Currently
supported options ( each should be either ` True ` or ` False ` ) :
- prompt _ verify : ( default False ) If True , print summary of
calculations to be performed and prompt user to confirm before
submitting for execution .
- parallelize : ( default False ) If True , submit calculations in
parallel .
- client : distributed . Client or None ( default None ) The
dask . distributed Client used to schedule computations . If None
and parallelize is True , a LocalCluster will be started .
- write _ to _ tar : ( default True ) If True , write results of calculations
to . tar files , one for each : py : class : ` aospy . Run ` object .
These tar files have an identical directory structures the
standard output relative to their root directory , which is
specified via the ` tar _ direc _ out ` argument of each Proj
object ' s instantiation .
Returns
A list of the return values from each : py : meth : ` aospy . Calc . compute ` call
If a calculation ran without error , this value is the
: py : class : ` aospy . Calc ` object itself , with the results of its
calculations saved in its ` ` data _ out ` ` attribute . ` ` data _ out ` ` is a
dictionary , with the keys being the temporal - regional reduction
identifiers ( e . g . ' reg . av ' ) , and the values being the corresponding
result .
If any error occurred during a calculation , the return value is None .
Raises
AospyException
If the ` ` prompt _ verify ` ` option is set to True and the user does not
respond affirmatively to the prompt ."""
|
if exec_options is None :
exec_options = dict ( )
if exec_options . pop ( 'prompt_verify' , False ) :
print ( _print_suite_summary ( calc_suite_specs ) )
_user_verify ( )
calc_suite = CalcSuite ( calc_suite_specs )
calcs = calc_suite . create_calcs ( )
if not calcs :
raise AospyException ( "The specified combination of parameters yielded zero " "calculations. Most likely, one of the parameters is " "inadvertently empty." )
return _exec_calcs ( calcs , ** exec_options )
|
def from_description ( cls , description , attrs ) :
"""Create an object from a dynamo3 response"""
|
hash_key = None
range_key = None
index_type = description [ "Projection" ] [ "ProjectionType" ]
includes = description [ "Projection" ] . get ( "NonKeyAttributes" )
for data in description [ "KeySchema" ] :
name = data [ "AttributeName" ]
if name not in attrs :
continue
key_type = data [ "KeyType" ]
if key_type == "HASH" :
hash_key = TableField ( name , attrs [ name ] . data_type , key_type )
elif key_type == "RANGE" :
range_key = TableField ( name , attrs [ name ] . data_type , key_type )
throughput = description [ "ProvisionedThroughput" ]
return cls ( description [ "IndexName" ] , index_type , description [ "IndexStatus" ] , hash_key , range_key , throughput [ "ReadCapacityUnits" ] , throughput [ "WriteCapacityUnits" ] , description . get ( "IndexSizeBytes" , 0 ) , includes , description , )
|
def handle_real_loop_comparison ( self , args , target , upper_bound ) :
"""Handle comparison for real loops .
Add the correct comparison operator if possible ."""
|
# order is 1 for increasing loop , - 1 for decreasing loop and 0 if it is
# not known at compile time
if len ( args ) <= 2 :
order = 1
elif isinstance ( args [ 2 ] , ast . Num ) :
order = - 1 + 2 * ( int ( args [ 2 ] . n ) > 0 )
elif isinstance ( args [ 1 ] , ast . Num ) and isinstance ( args [ 0 ] , ast . Num ) :
order = - 1 + 2 * ( int ( args [ 1 ] . n ) > int ( args [ 0 ] . n ) )
else :
order = 0
comparison = "{} < {}" if order == 1 else "{} > {}"
comparison = comparison . format ( target , upper_bound )
return comparison
|
def path ( self , which = None ) :
"""Extend ` ` nailgun . entity _ mixins . Entity . path ` ` .
The format of the returned path depends on the value of ` ` which ` ` :
clone
/ api / roles / : role _ id / clone
Otherwise , call ` ` super ` ` ."""
|
if which == 'clone' :
return '{0}/{1}' . format ( super ( Role , self ) . path ( which = 'self' ) , which )
return super ( Role , self ) . path ( which )
|
def _write_frame ( self , data ) :
"""Write a frame to the PN532 with the specified data bytearray ."""
|
assert data is not None and 0 < len ( data ) < 255 , 'Data must be array of 1 to 255 bytes.'
# Build frame to send as :
# - SPI data write ( 0x01)
# - Preamble ( 0x00)
# - Start code ( 0x00 , 0xFF )
# - Command length ( 1 byte )
# - Command length checksum
# - Command bytes
# - Checksum
# - Postamble ( 0x00)
length = len ( data )
frame = bytearray ( length + 8 )
frame [ 0 ] = PN532_SPI_DATAWRITE
frame [ 1 ] = PN532_PREAMBLE
frame [ 2 ] = PN532_STARTCODE1
frame [ 3 ] = PN532_STARTCODE2
frame [ 4 ] = length & 0xFF
frame [ 5 ] = self . _uint8_add ( ~ length , 1 )
frame [ 6 : - 2 ] = data
checksum = reduce ( self . _uint8_add , data , 0xFF )
frame [ - 2 ] = ~ checksum & 0xFF
frame [ - 1 ] = PN532_POSTAMBLE
# Send frame .
logger . debug ( 'Write frame: 0x{0}' . format ( binascii . hexlify ( frame ) ) )
self . _gpio . set_low ( self . _cs )
self . _busy_wait_ms ( 2 )
self . _spi . write ( frame )
self . _gpio . set_high ( self . _cs )
|
def pil_image ( self , fill_value = None , compute = True ) :
"""Return a PIL image from the current image .
Args :
fill _ value ( int or float ) : Value to use for NaN null values .
See : meth : ` ~ trollimage . xrimage . XRImage . finalize ` for more
info .
compute ( bool ) : Whether to return a fully computed PIL . Image
object ( True ) or return a dask Delayed object representing
the Image ( False ) . This is True by default ."""
|
channels , mode = self . finalize ( fill_value )
res = channels . transpose ( 'y' , 'x' , 'bands' )
img = dask . delayed ( PILImage . fromarray ) ( np . squeeze ( res . data ) , mode )
if compute :
img = img . compute ( )
return img
|
def AgregarRomaneo ( self , nro_romaneo , fecha_romaneo , ** kwargs ) :
"Agrego uno o más romaneos a la liq ."
|
romaneo = dict ( nroRomaneo = nro_romaneo , fechaRomaneo = fecha_romaneo , fardo = [ ] )
self . solicitud [ 'romaneo' ] . append ( romaneo )
return True
|
def display_name ( self , display_name ) :
"""Sets the display _ name of this OrderFulfillmentRecipient .
The display name of the fulfillment recipient . If provided , overrides the value from customer profile indicated by customer _ id .
: param display _ name : The display _ name of this OrderFulfillmentRecipient .
: type : str"""
|
if display_name is None :
raise ValueError ( "Invalid value for `display_name`, must not be `None`" )
if len ( display_name ) > 255 :
raise ValueError ( "Invalid value for `display_name`, length must be less than `255`" )
self . _display_name = display_name
|
def set_many ( self , block , update_dict ) :
"""Update many fields on an XBlock simultaneously .
: param block : the block to update
: type block : : class : ` ~ xblock . core . XBlock `
: param update _ dict : A map of field names to their new values
: type update _ dict : dict"""
|
for key , value in six . iteritems ( update_dict ) :
self . set ( block , key , value )
|
def get_logger ( name ) :
"""Helper function to get a logger"""
|
if name in loggers :
return loggers [ name ]
logger = logging . getLogger ( name )
logger . propagate = False
pre1 , suf1 = hash_coloured_escapes ( name ) if supports_color ( ) else ( '' , '' )
pre2 , suf2 = hash_coloured_escapes ( name + 'salt' ) if supports_color ( ) else ( '' , '' )
formatter = logging . Formatter ( '%(levelname)s {}+{}+{} ' '%(name)s: %(message)s' . format ( pre1 , pre2 , suf1 ) )
ch = logging . StreamHandler ( )
ch . setFormatter ( formatter )
logger . addHandler ( ch )
loggers [ name ] = logger
logger . once_dict = { }
return logger
|
def export ( self , storage_client , overwrite = True ) :
'''a method to export all the records in collection to another platform
: param storage _ client : class object with storage client methods
: return : string with exit message'''
|
title = '%s.export' % self . __class__ . __name__
# validate storage client
method_list = [ 'save' , 'load' , 'list' , 'export' , 'delete' , 'remove' , '_import' , 'collection_name' ]
for method in method_list :
if not getattr ( storage_client , method , None ) :
from labpack . parsing . grammar import join_words
raise ValueError ( '%s(storage_client=...) must be a client object with %s methods.' % ( title , join_words ( method_list ) ) )
# walk collection folder to find files
import os
root_segments = self . collection_folder . split ( os . sep )
count = 0
skipped = 0
for file_path in self . localhost . walk ( self . collection_folder ) :
path_segments = file_path . split ( os . sep )
for i in range ( len ( root_segments ) ) :
del path_segments [ 0 ]
record_key = os . path . join ( * path_segments )
record_key = record_key . replace ( '\\' , '/' )
# read and save files
record_data = open ( file_path , 'rb' ) . read ( )
last_modified = os . path . getmtime ( file_path )
outcome = storage_client . _import ( record_key , record_data , overwrite = overwrite , last_modified = last_modified )
if outcome :
count += 1
else :
skipped += 1
# report outcome
plural = ''
skip_insert = ''
new_folder = storage_client . collection_name
if count != 1 :
plural = 's'
if skipped > 0 :
skip_plural = ''
if skipped > 1 :
skip_plural = 's'
skip_insert = ' %s record%s skipped to avoid overwrite.' % ( str ( skipped ) , skip_plural )
exit_msg = '%s record%s exported to %s.%s' % ( str ( count ) , plural , new_folder , skip_insert )
return exit_msg
|
def delete_file ( self , sass_filename , sass_fileurl ) :
"""Delete a * . css file , but only if it has been generated through a SASS / SCSS file ."""
|
if self . use_static_root :
destpath = os . path . join ( self . static_root , os . path . splitext ( sass_fileurl ) [ 0 ] + '.css' )
else :
destpath = os . path . splitext ( sass_filename ) [ 0 ] + '.css'
if os . path . isfile ( destpath ) :
os . remove ( destpath )
self . processed_files . append ( sass_filename )
if self . verbosity > 1 :
self . stdout . write ( "Deleted '{0}'\n" . format ( destpath ) )
|
async def list ( cls , fields : Iterable [ str ] = None ) -> Sequence [ dict ] :
'''Lists the keypair resource policies .
You need an admin privilege for this operation .'''
|
if fields is None :
fields = ( 'name' , 'created_at' , 'total_resource_slots' , 'max_concurrent_sessions' , 'max_vfolder_count' , 'max_vfolder_size' , 'idle_timeout' , )
q = 'query {' ' keypair_resource_policies {' ' $fields' ' }' '}'
q = q . replace ( '$fields' , ' ' . join ( fields ) )
rqst = Request ( cls . session , 'POST' , '/admin/graphql' )
rqst . set_json ( { 'query' : q , } )
async with rqst . fetch ( ) as resp :
data = await resp . json ( )
return data [ 'keypair_resource_policies' ]
|
def with_cardinality ( cls , cardinality , converter , pattern = None , listsep = ',' ) :
"""Creates a type converter for the specified cardinality
by using the type converter for T .
: param cardinality : Cardinality to use ( 0 . . 1 , 0 . . * , 1 . . * ) .
: param converter : Type converter ( function ) for data type T .
: param pattern : Regexp pattern for an item ( = converter . pattern ) .
: return : type - converter for optional < T > ( T or None ) ."""
|
if cardinality is Cardinality . one :
return converter
# - - NORMAL - CASE
builder_func = getattr ( cls , "with_%s" % cardinality . name )
if cardinality is Cardinality . zero_or_one :
return builder_func ( converter , pattern )
else : # - - MANY CASE : 0 . . * , 1 . . *
return builder_func ( converter , pattern , listsep = listsep )
|
def query ( self , ns , selector = '*' ) :
"""Query the label store for labels
: param ns : Label namespace ( ` bind _ pwd ` for example )
: type ns : str
: param selector : Target selector ( ` test ` or ` test . guest ` for example )
: type selector : str"""
|
q , r = HicaLabelStore . PREFIX + '.' + ns , [ ]
for ( key , value ) in self . items :
if not selector and key == q :
r . append ( ( key , value ) )
if key . startswith ( q ) and key != q :
sub = key [ len ( q ) : ]
m = re . match ( '.' + selector , sub )
if m :
r . append ( ( key , value ) )
return r
|
def is_format_selected ( image_format , formats , progs ) :
"""Determine if the image format is selected by command line arguments ."""
|
intersection = formats & Settings . formats
mode = _is_program_selected ( progs )
result = ( image_format in intersection ) and mode
return result
|
async def open_async ( self ) :
"""Open the Sender using the supplied conneciton .
If the handler has previously been redirected , the redirect
context will be used to create a new handler before opening it .
: param connection : The underlying client shared connection .
: type : connection : ~ uamqp . async _ ops . connection _ async . ConnectionAsync"""
|
self . running = True
if self . redirected :
self . target = self . redirected . address
self . _handler = SendClientAsync ( self . target , auth = self . client . get_auth ( ) , debug = self . client . debug , msg_timeout = self . timeout , error_policy = self . retry_policy , keep_alive_interval = self . keep_alive , client_name = self . name , properties = self . client . create_properties ( ) , loop = self . loop )
await self . _handler . open_async ( )
while not await self . _handler . client_ready_async ( ) :
await asyncio . sleep ( 0.05 )
|
def hexdump ( src , length = 16 , sep = '.' ) :
"""Hexdump function by sbz and 7h3rAm on Github :
( https : / / gist . github . com / 7h3rAm / 5603718 ) .
: param src : Source , the string to be shown in hexadecimal format
: param length : Number of hex characters to print in one row
: param sep : Unprintable characters representation
: return :"""
|
filtr = '' . join ( [ ( len ( repr ( chr ( x ) ) ) == 3 ) and chr ( x ) or sep for x in range ( 256 ) ] )
lines = [ ]
for c in xrange ( 0 , len ( src ) , length ) :
chars = src [ c : c + length ]
hexstring = ' ' . join ( [ "%02x" % ord ( x ) for x in chars ] )
if len ( hexstring ) > 24 :
hexstring = "%s %s" % ( hexstring [ : 24 ] , hexstring [ 24 : ] )
printable = '' . join ( [ "%s" % ( ( ord ( x ) <= 127 and filtr [ ord ( x ) ] ) or sep ) for x in chars ] )
lines . append ( " %02x: %-*s |%s|\n" % ( c , length * 3 , hexstring , printable ) )
print ( '' . join ( lines ) )
|
def resizeEvent ( self , event ) :
"""Updates the position of the additional buttons when this widget \
resizes .
: param event | < QResizeEvet >"""
|
super ( XTabWidget , self ) . resizeEvent ( event )
self . adjustButtons ( )
|
def get_usage ( self ) :
"""Get fitness locations and their current usage ."""
|
resp = requests . get ( FITNESS_URL , timeout = 30 )
resp . raise_for_status ( )
soup = BeautifulSoup ( resp . text , "html5lib" )
eastern = pytz . timezone ( 'US/Eastern' )
output = [ ]
for item in soup . findAll ( "div" , { "class" : "barChart" } ) :
data = [ x . strip ( ) for x in item . get_text ( "\n" ) . strip ( ) . split ( "\n" ) ]
data = [ x for x in data if x ]
name = re . sub ( r"\s*(Hours)?\s*-?\s*(CLOSED|OPEN)?$" , "" , data [ 0 ] , re . I ) . strip ( )
output . append ( { "name" : name , "open" : "Open" in data [ 1 ] , "count" : int ( data [ 2 ] . rsplit ( " " , 1 ) [ - 1 ] ) , "updated" : eastern . localize ( datetime . datetime . strptime ( data [ 3 ] [ 8 : ] . strip ( ) , '%m/%d/%Y %I:%M %p' ) ) . isoformat ( ) , "percent" : int ( data [ 4 ] [ : - 1 ] ) } )
return output
|
def validate_deserialize ( rawmsg , requrl = None , check_expiration = True , decode_payload = True , algorithm_name = DEFAULT_ALGO ) :
"""Validate a JWT compact serialization and return the header and
payload if the signature is good .
If check _ expiration is False , the payload will be accepted even if
expired .
If decode _ payload is True then this function will attempt to decode
it as JSON , otherwise the raw payload will be returned . Note that
it is always decoded from base64url ."""
|
assert algorithm_name in ALGORITHM_AVAILABLE
algo = ALGORITHM_AVAILABLE [ algorithm_name ]
segments = rawmsg . split ( '.' )
if len ( segments ) != 3 or not all ( segments ) :
raise InvalidMessage ( 'must contain 3 non-empty segments' )
header64 , payload64 , cryptoseg64 = segments
try :
signature = base64url_decode ( cryptoseg64 . encode ( 'utf8' ) )
payload_data = base64url_decode ( payload64 . encode ( 'utf8' ) )
header_data = base64url_decode ( header64 . encode ( 'utf8' ) )
header = json . loads ( header_data . decode ( 'utf8' ) )
if decode_payload :
payload = json . loads ( payload_data . decode ( 'utf8' ) )
else :
payload = payload_data
except Exception as err :
raise InvalidMessage ( str ( err ) )
try :
valid = _verify_signature ( '{}.{}' . format ( header64 , payload64 ) , header , signature , algo )
except Exception as err :
raise InvalidMessage ( 'failed to verify signature: {}' . format ( err ) )
if not valid :
return None , None
if decode_payload :
_verify_payload ( payload , check_expiration , requrl )
return header , payload
|
def read ( self , size = None ) :
"""Reads a byte string from the file - like object at the current offset .
The function will read a byte string of the specified size or
all of the remaining data if no size was specified .
Args :
size ( Optional [ int ] ) : number of bytes to read , where None is all
remaining data .
Returns :
bytes : data read .
Raises :
IOError : if the read failed .
OSError : if the read failed ."""
|
if not self . _is_open :
raise IOError ( 'Not opened.' )
if self . _current_offset < 0 :
raise IOError ( 'Invalid current offset value less than zero.' )
# The SleuthKit is not POSIX compliant in its read behavior . Therefore
# pytsk3 will raise an IOError if the read offset is beyond the data size .
if self . _current_offset >= self . _size :
return b''
if size is None or self . _current_offset + size > self . _size :
size = self . _size - self . _current_offset
if self . _tsk_attribute :
data = self . _tsk_file . read_random ( self . _current_offset , size , self . _tsk_attribute . info . type , self . _tsk_attribute . info . id )
else :
data = self . _tsk_file . read_random ( self . _current_offset , size )
# It is possible the that returned data size is not the same as the
# requested data size . At this layer we don ' t care and this discrepancy
# should be dealt with on a higher layer if necessary .
self . _current_offset += len ( data )
return data
|
def get_conn ( opts , profile ) :
'''Return a client object for accessing consul'''
|
opts_pillar = opts . get ( 'pillar' , { } )
opts_master = opts_pillar . get ( 'master' , { } )
opts_merged = { }
opts_merged . update ( opts_master )
opts_merged . update ( opts_pillar )
opts_merged . update ( opts )
if profile :
conf = opts_merged . get ( profile , { } )
else :
conf = opts_merged
params = { }
for key in conf :
if key . startswith ( 'consul.' ) :
params [ key . split ( '.' ) [ 1 ] ] = conf [ key ]
if 'dc' in params :
pillarenv = opts_merged . get ( 'pillarenv' ) or 'base'
params [ 'dc' ] = _resolve_datacenter ( params [ 'dc' ] , pillarenv )
if consul : # Sanity check . ACL Tokens are supported on python - consul 0.4.7 onwards only .
if consul . __version__ < '0.4.7' and params . get ( 'target' ) :
params . pop ( 'target' )
return consul . Consul ( ** params )
else :
raise CommandExecutionError ( '(unable to import consul, ' 'module most likely not installed. Download python-consul ' 'module and be sure to import consul)' )
|
def comment_form ( context , object ) :
"""Usage :
{ % comment _ form obj as comment _ form % }
Will read the ` user ` var out of the contex to know if the form should be
form an auth ' d user or not ."""
|
user = context . get ( "user" )
form_class = context . get ( "form" , CommentForm )
form = form_class ( obj = object , user = user )
return form
|
def lunch ( rest ) :
"Pick where to go to lunch"
|
rs = rest . strip ( )
if not rs :
return "Give me an area and I'll pick a place: (%s)" % ( ', ' . join ( list ( pmxbot . config . lunch_choices ) ) )
if rs not in pmxbot . config . lunch_choices :
return "I didn't recognize that area; here's what i have: (%s)" % ( ', ' . join ( list ( pmxbot . config . lunch_choices ) ) )
choices = pmxbot . config . lunch_choices [ rs ]
return random . choice ( choices )
|
def add_parens ( line , maxline , indent , statements = statements , count = count ) :
"""Attempt to add parentheses around the line
in order to make it splittable ."""
|
if line [ 0 ] in statements :
index = 1
if not line [ 0 ] . endswith ( ' ' ) :
index = 2
assert line [ 1 ] == ' '
line . insert ( index , '(' )
if line [ - 1 ] == ':' :
line . insert ( - 1 , ')' )
else :
line . append ( ')' )
# That was the easy stuff . Now for assignments .
groups = list ( get_assign_groups ( line ) )
if len ( groups ) == 1 : # So sad , too bad
return line
counts = list ( count ( x ) for x in groups )
didwrap = False
# If the LHS is large , wrap it first
if sum ( counts [ : - 1 ] ) >= maxline - indent - 4 :
for group in groups [ : - 1 ] :
didwrap = False
# Only want to know about last group
if len ( group ) > 1 :
group . insert ( 0 , '(' )
group . insert ( - 1 , ')' )
didwrap = True
# Might not need to wrap the RHS if wrapped the LHS
if not didwrap or counts [ - 1 ] > maxline - indent - 10 :
groups [ - 1 ] . insert ( 0 , '(' )
groups [ - 1 ] . append ( ')' )
return [ item for group in groups for item in group ]
|
def gunzip ( input_gzip_file , block_size = 1024 ) :
"""Gunzips the input file to the same directory
: param input _ gzip _ file : File to be gunzipped
: return : path to the gunzipped file
: rtype : str"""
|
assert os . path . splitext ( input_gzip_file ) [ 1 ] == '.gz'
assert is_gzipfile ( input_gzip_file )
with gzip . open ( input_gzip_file ) as infile :
with open ( os . path . splitext ( input_gzip_file ) [ 0 ] , 'w' ) as outfile :
while True :
block = infile . read ( block_size )
if block == '' :
break
else :
outfile . write ( block )
return outfile . name
|
def restore ( self , state ) :
"""Restore the contents of this virtual stream walker .
Args :
state ( dict ) : The previously serialized state .
Raises :
ArgumentError : If the serialized state does not have
a matching selector ."""
|
selector = DataStreamSelector . FromString ( state . get ( u'selector' ) )
if self . selector != selector :
raise ArgumentError ( "Attempted to restore an InvalidStreamWalker with a different selector" , selector = self . selector , serialized_data = state )
if state . get ( u'type' ) != u'invalid' :
raise ArgumentError ( "Invalid serialized state for InvalidStreamWalker" , serialized_data = state )
|
def _cursor_pb ( cursor_pair ) :
"""Convert a cursor pair to a protobuf .
If ` ` cursor _ pair ` ` is : data : ` None ` , just returns : data : ` None ` .
Args :
cursor _ pair ( Optional [ Tuple [ list , bool ] ] ) : Two - tuple of
* a list of field values .
* a ` ` before ` ` flag
Returns :
Optional [ google . cloud . firestore _ v1beta1 . types . Cursor ] : A
protobuf cursor corresponding to the values ."""
|
if cursor_pair is not None :
data , before = cursor_pair
value_pbs = [ _helpers . encode_value ( value ) for value in data ]
return query_pb2 . Cursor ( values = value_pbs , before = before )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.