signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def inspect_edge ( G : AnalysisGraph , source : str , target : str ) :
"""' Drill down ' into an edge in the analysis graph and inspect its
provenance . This function prints the provenance .
Args :
source
target""" | return create_statement_inspection_table ( G [ source ] [ target ] [ "InfluenceStatements" ] ) |
async def cancellable_wait ( self , * awaitables : Awaitable [ _R ] , timeout : float = None ) -> _R :
"""Wait for the first awaitable to complete , unless we timeout or the
token is triggered .
Returns the result of the first awaitable to complete .
Raises TimeoutError if we timeout or
` ~ cancel _ token . exceptions . OperationCancelled ` if the cancel token is
triggered .
All pending futures are cancelled before returning .""" | futures = [ asyncio . ensure_future ( a , loop = self . loop ) for a in awaitables + ( self . wait ( ) , ) ]
try :
done , pending = await asyncio . wait ( futures , timeout = timeout , return_when = asyncio . FIRST_COMPLETED , loop = self . loop , )
except asyncio . futures . CancelledError : # Since we use return _ when = asyncio . FIRST _ COMPLETED above , we can be sure none of our
# futures will be done here , so we don ' t need to check if any is done before cancelling .
for future in futures :
future . cancel ( )
raise
for task in pending :
task . cancel ( )
if not done :
raise TimeoutError ( )
if self . triggered_token is not None : # We ' ve been asked to cancel so we don ' t care about our future , but we must
# consume its exception or else asyncio will emit warnings .
for task in done :
task . exception ( )
raise OperationCancelled ( "Cancellation requested by {} token" . format ( self . triggered_token ) )
return done . pop ( ) . result ( ) |
def on_parent_exit ( signame ) :
"""Return a function to be run in a child process which will trigger SIGNAME
to be sent when the parent process dies""" | signum = getattr ( signal , signame )
def set_parent_exit_signal ( ) : # http : / / linux . die . net / man / 2 / prctl
result = cdll [ 'libc.so.6' ] . prctl ( PR_SET_PDEATHSIG , signum )
if result != 0 :
raise PrCtlError ( 'prctl failed with error code %s' % result )
return set_parent_exit_signal |
def run_workload ( database , keys , parameters ) :
"""Runs workload against the database .""" | total_weight = 0.0
weights = [ ]
operations = [ ]
latencies_ms = { }
for operation in OPERATIONS :
weight = float ( parameters [ operation ] )
if weight <= 0.0 :
continue
total_weight += weight
op_code = operation . split ( 'proportion' ) [ 0 ]
operations . append ( op_code )
weights . append ( total_weight )
latencies_ms [ op_code ] = [ ]
threads = [ ]
start = timeit . default_timer ( )
for i in range ( int ( parameters [ 'num_worker' ] ) ) :
thread = WorkloadThread ( database , keys , parameters , total_weight , weights , operations )
thread . start ( )
threads . append ( thread )
for thread in threads :
thread . join ( )
end = timeit . default_timer ( )
for thread in threads :
thread_latencies_ms = thread . latencies_ms ( )
for key in latencies_ms . keys ( ) :
latencies_ms [ key ] . extend ( thread_latencies_ms [ key ] )
aggregate_metrics ( latencies_ms , ( end - start ) * 1000.0 , parameters [ 'num_bucket' ] ) |
def a ( self , ** kwargs ) :
'''Returns the lattice parameter , a , in Angstroms at a given
temperature , ` T ` , in Kelvin ( default : 300 K ) .''' | T = kwargs . get ( 'T' , 300. )
return ( self . a_300K ( ** kwargs ) + self . thermal_expansion ( ** kwargs ) * ( T - 300. ) ) |
def files_info ( self , area_uuid , file_list ) :
"""Get information about files
: param str area _ uuid : A RFC4122 - compliant ID for the upload area
: param list file _ list : The names the files in the Upload Area about which we want information
: return : an array of file information dicts
: rtype : list of dicts
: raises UploadApiException : if information could not be obtained""" | path = "/area/{uuid}/files_info" . format ( uuid = area_uuid )
file_list = [ urlparse . quote ( filename ) for filename in file_list ]
response = self . _make_request ( 'put' , path = path , json = file_list )
return response . json ( ) |
def unlock ( self , session ) :
"""Relinquishes a lock for the specified resource .
Corresponds to viUnlock function of the VISA library .
: param session : Unique logical identifier to a session .
: return : return value of the library call .
: rtype : : class : ` pyvisa . constants . StatusCode `""" | try :
sess = self . sessions [ session ]
except KeyError :
return StatusCode . error_invalid_object
return sess . unlock ( ) |
def haversine ( px , py , r = r_mm ) :
'''Calculate the haversine distance between two points
defined by ( lat , lon ) tuples .
Args :
px ( ( float , float ) ) : lat / long position 1
py ( ( float , float ) ) : lat / long position 2
r ( float ) : Radius of sphere
Returns :
( int ) : Distance in mm .''' | lat1 , lon1 = px
lat2 , lon2 = py
dlat = math . radians ( lat2 - lat1 )
dlon = math . radians ( lon2 - lon1 )
lat1 = math . radians ( lat1 )
lat2 = math . radians ( lat2 )
a = math . sin ( dlat / 2 ) ** 2 + math . cos ( lat1 ) * math . cos ( lat2 ) * math . sin ( dlon / 2 ) ** 2
c = 2 * math . asin ( math . sqrt ( a ) )
return c * r |
def get_results ( self , ** kwargs ) :
"""Returns : class : ` NodeResults ` instance .
Subclasses should extend this method ( if needed ) by adding
specialized code that performs some kind of post - processing .""" | # Check whether the process completed .
if self . returncode is None :
raise self . Error ( "return code is None, you should call wait, communicate or poll" )
if self . status is None or self . status < self . S_DONE :
raise self . Error ( "Task is not completed" )
return self . Results . from_node ( self ) |
def get_authorize_url ( self , ** params ) :
'''Returns a formatted authorize URL .
: param \ * \ * params : Additional keyworded arguments to be added to the
request querystring .
: type \ * \ * params : dict''' | params = self . session_obj . sign ( self . authorize_url , self . app_id , self . app_secret , ** params )
return self . authorize_url + '?' + params |
def alterar ( self , id_tipo_acesso , protocolo ) :
"""Edit access type by its identifier .
: param id _ tipo _ acesso : Access type identifier .
: param protocolo : Protocol .
: return : None
: raise ProtocoloTipoAcessoDuplicadoError : Protocol already exists .
: raise InvalidParameterError : Protocol value is invalid or none .
: raise TipoAcessoNaoExisteError : Access type doesn ' t exist .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | if not is_valid_int_param ( id_tipo_acesso ) :
raise InvalidParameterError ( u'Access type id is invalid or was not informed.' )
tipo_acesso_map = dict ( )
tipo_acesso_map [ 'protocolo' ] = protocolo
url = 'tipoacesso/' + str ( id_tipo_acesso ) + '/'
code , xml = self . submit ( { 'tipo_acesso' : tipo_acesso_map } , 'PUT' , url )
return self . response ( code , xml ) |
def drp_load ( package , resource , confclass = None ) :
"""Load the DRPS from a resource file .""" | data = pkgutil . get_data ( package , resource )
return drp_load_data ( package , data , confclass = confclass ) |
def block_splitter ( self , sources , weight = get_weight , key = lambda src : 1 ) :
""": param sources : a list of sources
: param weight : a weight function ( default . weight )
: param key : None or ' src _ group _ id '
: returns : an iterator over blocks of sources""" | ct = self . oqparam . concurrent_tasks or 1
maxweight = self . csm . get_maxweight ( weight , ct , source . MINWEIGHT )
if not hasattr ( self , 'logged' ) :
if maxweight == source . MINWEIGHT :
logging . info ( 'Using minweight=%d' , source . MINWEIGHT )
else :
logging . info ( 'Using maxweight=%d' , maxweight )
self . logged = True
return general . block_splitter ( sources , maxweight , weight , key ) |
def maximum_weighted_independent_set_qubo ( G , weight = None , lagrange = 2.0 ) :
"""Return the QUBO with ground states corresponding to a maximum weighted independent set .
Parameters
G : NetworkX graph
weight : string , optional ( default None )
If None , every node has equal weight . If a string , use this node
attribute as the node weight . A node without this attribute is
assumed to have max weight .
lagrange : optional ( default 2)
Lagrange parameter to weight constraints ( no edges within set )
versus objective ( largest set possible ) .
Returns
QUBO : dict
The QUBO with ground states corresponding to a maximum weighted independent set .
Examples
> > > from dwave _ networkx . algorithms . independent _ set import maximum _ weighted _ independent _ set _ qubo
> > > G = nx . path _ graph ( 3)
> > > Q = maximum _ weighted _ independent _ set _ qubo ( G , weight = ' weight ' , lagrange = 2.0)
> > > Q [ ( 0 , 0 ) ]
-1.0
> > > Q [ ( 1 , 1 ) ]
-1.0
> > > Q [ ( 0 , 1 ) ]
2.0""" | # empty QUBO for an empty graph
if not G :
return { }
# We assume that the sampler can handle an unstructured QUBO problem , so let ' s set one up .
# Let us define the largest independent set to be S .
# For each node n in the graph , we assign a boolean variable v _ n , where v _ n = 1 when n
# is in S and v _ n = 0 otherwise .
# We call the matrix defining our QUBO problem Q .
# On the diagnonal , we assign the linear bias for each node to be the negative of its weight .
# This means that each node is biased towards being in S . Weights are scaled to a maximum of 1.
# Negative weights are considered 0.
# On the off diagnonal , we assign the off - diagonal terms of Q to be 2 . Thus , if both
# nodes are in S , the overall energy is increased by 2.
cost = dict ( G . nodes ( data = weight , default = 1 ) )
scale = max ( cost . values ( ) )
Q = { ( node , node ) : min ( - cost [ node ] / scale , 0.0 ) for node in G }
Q . update ( { edge : lagrange for edge in G . edges } )
return Q |
def _merge_wf_outputs ( new , cur , parallel ) :
"""Merge outputs for a sub - workflow , replacing variables changed in later steps .
ignore _ ids are those used internally in a sub - workflow but not exposed to subsequent steps""" | new_ids = set ( [ ] )
out = [ ]
for v in new :
outv = { }
outv [ "source" ] = v [ "id" ]
outv [ "id" ] = "%s" % get_base_id ( v [ "id" ] )
outv [ "type" ] = v [ "type" ]
if "secondaryFiles" in v :
outv [ "secondaryFiles" ] = v [ "secondaryFiles" ]
if tz . get_in ( [ "outputBinding" , "secondaryFiles" ] , v ) :
outv [ "secondaryFiles" ] = tz . get_in ( [ "outputBinding" , "secondaryFiles" ] , v )
new_ids . add ( outv [ "id" ] )
out . append ( outv )
for outv in cur :
if outv [ "id" ] not in new_ids :
out . append ( outv )
return out |
def as_svg_data_uri ( matrix , version , scale = 1 , border = None , color = '#000' , background = None , xmldecl = False , svgns = True , title = None , desc = None , svgid = None , svgclass = 'segno' , lineclass = 'qrline' , omitsize = False , unit = '' , encoding = 'utf-8' , svgversion = None , nl = False , encode_minimal = False , omit_charset = False ) :
"""Converts the matrix to a SVG data URI .
The XML declaration is omitted by default ( set ` ` xmldecl ` ` to ` ` True ` `
to enable it ) , further the newline is omitted by default ( set ` ` nl ` ` to
` ` True ` ` to enable it ) .
Aside from the missing ` ` out ` ` parameter and the different ` ` xmldecl ` `
and ` ` nl ` ` default values and the additional parameter ` ` encode _ minimal ` `
and ` ` omit _ charset ` ` this function uses the same parameters as the
usual SVG serializer .
: param bool encode _ minimal : Indicates if the resulting data URI should
use minimal percent encoding ( disabled by default ) .
: param bool omit _ charset : Indicates if the ` ` ; charset = . . . ` ` should be omitted
( disabled by default )
: rtype : str""" | encode = partial ( quote , safe = b"" ) if not encode_minimal else partial ( quote , safe = b" :/='" )
buff = io . BytesIO ( )
write_svg ( matrix , version , buff , scale = scale , color = color , background = background , border = border , xmldecl = xmldecl , svgns = svgns , title = title , desc = desc , svgclass = svgclass , lineclass = lineclass , omitsize = omitsize , encoding = encoding , svgid = svgid , unit = unit , svgversion = svgversion , nl = nl )
return 'data:image/svg+xml{0},{1}' . format ( ';charset=' + encoding if not omit_charset else '' , # Replace " quotes with ' and URL encode the result
# See also https : / / codepen . io / tigt / post / optimizing - svgs - in - data - uris
encode ( _replace_quotes ( buff . getvalue ( ) ) ) ) |
def to_text_format ( self ) :
'''Format as detached DNS information as text .''' | return '\n' . join ( itertools . chain ( ( self . fetch_date . strftime ( '%Y%m%d%H%M%S' ) , ) , ( rr . to_text ( ) for rr in self . resource_records ) , ( ) , ) ) |
def load_factory ( name , directory , configuration = None ) :
"""Load a factory and have it initialize in a particular directory
: param name : the name of the plugin to load
: param directory : the directory where the factory will reside
: return :""" | for entry_point in pkg_resources . iter_entry_points ( ENTRY_POINT ) :
if entry_point . name == name :
factory_class = entry_point . load ( require = False )
return factory_class ( directory , configuration )
raise KeyError |
def convert_md_to_rst ( md_path , rst_temp_path ) :
"""Convert the contents of a file from Markdown to reStructuredText .
Returns the converted text as a Unicode string .
Arguments :
md _ path : a path to a UTF - 8 encoded Markdown file to convert .
rst _ temp _ path : a temporary path to which to write the converted contents .""" | # Pandoc uses the UTF - 8 character encoding for both input and output .
command = "pandoc --write=rst --output=%s %s" % ( rst_temp_path , md_path )
print ( "converting with pandoc: %s to %s\n-->%s" % ( md_path , rst_temp_path , command ) )
if os . path . exists ( rst_temp_path ) :
os . remove ( rst_temp_path )
os . system ( command )
if not os . path . exists ( rst_temp_path ) :
s = ( "Error running: %s\n" " Did you install pandoc per the %s docstring?" % ( command , __file__ ) )
sys . exit ( s )
return read ( rst_temp_path ) |
def clearcal ( vis , weightonly = False ) :
"""Fill the imaging and calibration columns ( ` ` MODEL _ DATA ` ` ,
` ` CORRECTED _ DATA ` ` , ` ` IMAGING _ WEIGHT ` ` ) of each measurement set with
default values , creating the columns if necessary .
vis ( string )
Path to the input measurement set
weightonly ( boolean )
If true , just create the ` ` IMAGING _ WEIGHT ` ` column ; do not fill
in the visibility data columns .
If you want to reset calibration models , these days you probably want
: func : ` delmod _ cli ` . If you want to quickly make the columns go away , you
probably want : func : ` delcal ` .
Example : :
from pwkit . environments . casa import tasks
tasks . clearcal ( ' myvis . ms ' )""" | tb = util . tools . table ( )
cb = util . tools . calibrater ( )
# cb . open ( ) will create the tables if they ' re not present , so
# if that ' s the case , we don ' t actually need to run initcalset ( )
tb . open ( b ( vis ) , nomodify = False )
colnames = tb . colnames ( )
needinit = ( 'MODEL_DATA' in colnames ) or ( 'CORRECTED_DATA' in colnames )
if 'IMAGING_WEIGHT' not in colnames :
c = dict ( clearcal_imaging_col_tmpl )
c [ 'shape' ] = tb . getcell ( b'DATA' , 0 ) . shape [ - 1 : ]
tb . addcols ( { b'IMAGING_WEIGHT' : c } , clearcal_imaging_dminfo_tmpl )
tb . close ( )
if not weightonly :
import casadef
if casadef . casa_version . startswith ( '5.' ) :
cb . setvi ( old = True , quiet = False )
cb . open ( b ( vis ) )
if needinit :
cb . initcalset ( )
cb . close ( ) |
def refresh_frozen_cell ( self , key ) :
"""Refreshes a frozen cell""" | code = self . grid . code_array ( key )
result = self . grid . code_array . _eval_cell ( key , code )
self . grid . code_array . frozen_cache [ repr ( key ) ] = result |
def apply_path ( self , repo ) :
"""Set path to where the repo is and return original path""" | try : # rewrite repo for consistency
if repo . endswith ( '.git' ) :
repo = repo . split ( '.git' ) [ 0 ]
# get org and repo name and path repo will be cloned to
org , name = repo . split ( '/' ) [ - 2 : ]
path = join ( self . plugins_dir , org , name )
# save current path
cwd = getcwd ( )
# set to new repo path
self . ensure_dir ( path )
chdir ( path )
status = ( True , cwd , path )
except Exception as e : # pragma : no cover
status = ( False , str ( e ) )
return status |
def drop_constraints ( quiet = True , stdout = None ) :
"""Discover and drop all constraints .
: type : bool
: return : None""" | results , meta = db . cypher_query ( "CALL db.constraints()" )
pattern = re . compile ( ':(.*) \).*\.(\w*)' )
for constraint in results :
db . cypher_query ( 'DROP ' + constraint [ 0 ] )
match = pattern . search ( constraint [ 0 ] )
stdout . write ( ''' - Droping unique constraint and index on label {0} with property {1}.\n''' . format ( match . group ( 1 ) , match . group ( 2 ) ) )
stdout . write ( "\n" ) |
def relabeled ( self , label , new_label ) :
"""Return a new table with ` ` label ` ` specifying column label ( s )
replaced by corresponding ` ` new _ label ` ` .
Args :
` ` label ` ` - - ( str or array of str ) The label ( s ) of
columns to be changed .
` ` new _ label ` ` - - ( str or array of str ) : The new label ( s ) of
columns to be changed . Same number of elements as label .
Raises :
` ` ValueError ` ` - - if ` ` label ` ` does not exist in
table , or if the ` ` label ` ` and ` ` new _ label ` ` are not not of
equal length . Also , raised if ` ` label ` ` and / or ` ` new _ label ` `
are not ` ` str ` ` .
Returns :
New table with ` ` new _ label ` ` in place of ` ` label ` ` .
> > > tiles = Table ( ) . with _ columns ( ' letter ' , make _ array ( ' c ' , ' d ' ) ,
. . . ' count ' , make _ array ( 2 , 4 ) )
> > > tiles
letter | count
c | 2
d | 4
> > > tiles . relabeled ( ' count ' , ' number ' )
letter | number
c | 2
d | 4
> > > tiles # original table unmodified
letter | count
c | 2
d | 4
> > > tiles . relabeled ( make _ array ( ' letter ' , ' count ' ) ,
. . . make _ array ( ' column1 ' , ' column2 ' ) )
column1 | column2
c | 2
d | 4
> > > tiles . relabeled ( make _ array ( ' letter ' , ' number ' ) ,
. . . make _ array ( ' column1 ' , ' column2 ' ) )
Traceback ( most recent call last ) :
ValueError : Invalid labels . Column labels must already exist in table in order to be replaced .""" | copy = self . copy ( )
copy . relabel ( label , new_label )
return copy |
def replace_vertex_references ( self , mask ) :
"""Replace the vertex index references in every entity .
Parameters
mask : ( len ( self . vertices ) , ) int
Contains new vertex indexes
Alters
entity . points in self . entities
Replaced by mask [ entity . points ]""" | for entity in self . entities :
entity . points = mask [ entity . points ] |
def generate_files ( path = '' , ext = '' , level = None , dirs = False , files = True , verbosity = 0 ) :
"""Recursively generate files ( and thier stats ) in the indicated directory
Filter by the indicated file name extension ( ext )
Args :
path ( str ) : Root / base path to search .
ext ( str ) : File name extension . Only file paths that " . endswith ( ) " this string will be returned
level ( int , optional ) : Depth of file tree to halt recursion at .
None = full recursion to as deep as it goes
0 = nonrecursive , just provide a list of files at the root level of the tree
1 = one level of depth deeper in the tree
typ ( type ) : output type ( default : list ) . if a mapping type is provided the keys will be the full paths ( unique )
dirs ( bool ) : Whether to yield dir paths along with file paths ( default : False )
files ( bool ) : Whether to yield file paths ( default : True )
` dirs = True ` , ` files = False ` is equivalent to ` ls - d `
Returns :
list of dicts : dict keys are { ' path ' , ' name ' , ' bytes ' , ' created ' , ' modified ' , ' accessed ' , ' permissions ' }
path ( str ) : Full , absolute paths to file beneath the indicated directory and ending with ` ext `
name ( str ) : File name only ( everythin after the last slash in the path )
size ( int ) : File size in bytes
created ( datetime ) : File creation timestamp from file system
modified ( datetime ) : File modification timestamp from file system
accessed ( datetime ) : File access timestamp from file system
permissions ( int ) : File permissions bytes as a chown - style integer with a maximum of 4 digits
type ( str ) : One of ' file ' , ' dir ' , ' symlink - > file ' , ' symlink - > dir ' , ' symlink - > broken '
e . g . : 777 or 1755
Examples :
> > > ' util . py ' in [ d [ ' name ' ] for d in generate _ files ( os . path . dirname ( _ _ file _ _ ) , ext = ' . py ' , level = 0 ) ]
True
> > > ( d for d in generate _ files ( os . path . dirname ( _ _ file _ _ ) , ext = ' . py ' ) if d [ ' name ' ] = = ' util . py ' ) . next ( ) [ ' size ' ] > 1000
True
> > > sorted ( generate _ files ( ) . next ( ) . keys ( ) )
[ ' accessed ' , ' created ' , ' dir ' , ' mode ' , ' modified ' , ' name ' , ' path ' , ' size ' , ' type ' ]
There should be an _ _ init _ _ file in the same directory as this script .
And it should be at the top of the list .
> > > sorted ( d [ ' name ' ] for d in generate _ files ( os . path . dirname ( _ _ file _ _ ) , ext = ' . py ' , level = 0 ) ) [ 0]
' _ _ init _ _ . py '
> > > sorted ( list ( generate _ files ( ) ) [ 0 ] . keys ( ) )
[ ' accessed ' , ' created ' , ' dir ' , ' mode ' , ' modified ' , ' name ' , ' path ' , ' size ' , ' type ' ]
> > > all ( d [ ' type ' ] in ( ' file ' , ' dir ' , ' symlink - > file ' , ' symlink - > dir ' , ' mount - point - > file ' , ' mount - point - > dir ' , ' block - device ' , ' symlink - > broken ' ,
. . . ' pipe ' , ' special ' , ' socket ' , ' unknown ' )
. . . for d in generate _ files ( level = 1 , files = True , dirs = True ) )
True""" | path = path or './'
ext = str ( ext ) . lower ( )
for dir_path , dir_names , filenames in walk_level ( path , level = level ) :
if verbosity > 0 :
print ( 'Checking path "{}"' . format ( dir_path ) )
if files :
for fn in filenames : # itertools . chain ( filenames , dir _ names )
if ext and not fn . lower ( ) . endswith ( ext ) :
continue
yield path_status ( dir_path , fn , verbosity = verbosity )
if dirs : # TODO : warn user if ext and dirs both set
for fn in dir_names :
if ext and not fn . lower ( ) . endswith ( ext ) :
continue
yield path_status ( dir_path , fn , verbosity = verbosity ) |
def _callable_func ( self , func , axis , * args , ** kwargs ) :
"""Apply callable functions across given axis .
Args :
func : The functions to apply .
axis : Target axis to apply the function along .
Returns :
A new PandasQueryCompiler .""" | def callable_apply_builder ( df , axis = 0 ) :
if not axis :
df . index = index
df . columns = pandas . RangeIndex ( len ( df . columns ) )
else :
df . columns = index
df . index = pandas . RangeIndex ( len ( df . index ) )
result = df . apply ( func , axis = axis , * args , ** kwargs )
return result
index = self . index if not axis else self . columns
func_prepared = self . _build_mapreduce_func ( callable_apply_builder , axis = axis )
result_data = self . _map_across_full_axis ( axis , func_prepared )
return self . _post_process_apply ( result_data , axis ) |
def vagalume ( song ) :
"""Returns the lyrics found in vagalume . com . br for the specified mp3 file or
an empty string if not found .""" | translate = { '@' : 'a' , URLESCAPE : '' , ' ' : '-' }
artist = song . artist . lower ( )
artist = normalize ( artist , translate )
artist = re . sub ( r'\-{2,}' , '-' , artist )
title = song . title . lower ( )
title = normalize ( title , translate )
title = re . sub ( r'\-{2,}' , '-' , title )
url = 'https://www.vagalume.com.br/{}/{}.html' . format ( artist , title )
soup = get_url ( url )
body = soup . select ( 'div#lyrics' )
if body == [ ] :
return ''
content = body [ 0 ]
for br in content . find_all ( 'br' ) :
br . replace_with ( '\n' )
return content . get_text ( ) . strip ( ) |
def packages ( self ) :
"""Property for accessing : class : ` PackageManager ` instance , which is used to manage packages .
: rtype : yagocd . resources . package . PackageManager""" | if self . _package_manager is None :
self . _package_manager = PackageManager ( session = self . _session )
return self . _package_manager |
def with_extrapolation ( points , noise , n_points ) :
"""Smooths a set of points , but it extrapolates some points at the beginning
Args :
points ( : obj : ` list ` of : obj : ` Point ` )
noise ( float ) : Expected noise , the higher it is the more the path will
be smoothed .
Returns :
: obj : ` list ` of : obj : ` Point `""" | n_points = 10
return kalman_filter ( extrapolate_points ( points , n_points ) + points , noise ) [ n_points : ] |
def load_data ( cr , module_name , filename , idref = None , mode = 'init' ) :
"""Load an xml , csv or yml data file from your post script . The usual case for
this is the
occurrence of newly added essential or useful data in the module that is
marked with " noupdate = ' 1 ' " and without " forcecreate = ' 1 ' " so that it will
not be loaded by the usual upgrade mechanism . Leaving the ' mode ' argument
to its default ' init ' will load the data from your migration script .
Theoretically , you could simply load a stock file from the module , but be
careful not to reinitialize any data that could have been customized .
Preferably , select only the newly added items . Copy these to a file
in your migrations directory and load that file .
Leave it to the user to actually delete existing resources that are
marked with ' noupdate ' ( other named items will be deleted
automatically ) .
: param module _ name : the name of the module
: param filename : the path to the filename , relative to the module directory .
: param idref : optional hash with ? id mapping cache ?
: param mode :
one of ' init ' , ' update ' , ' demo ' , ' init _ no _ create ' .
Always use ' init ' for adding new items from files that are marked with
' noupdate ' . Defaults to ' init ' .
' init _ no _ create ' is a hack to load data for records which have
forcecreate = False set . As those records won ' t be recreated during the
update , standard Odoo would recreate the record if it was deleted ,
but this will fail in cases where there are required fields to be
filled which are not contained in the data file .""" | if idref is None :
idref = { }
logger . info ( '%s: loading %s' % ( module_name , filename ) )
_ , ext = os . path . splitext ( filename )
pathname = os . path . join ( module_name , filename )
fp = tools . file_open ( pathname )
try :
if ext == '.csv' :
noupdate = True
tools . convert_csv_import ( cr , module_name , pathname , fp . read ( ) , idref , mode , noupdate )
elif ext == '.yml' :
yaml_import ( cr , module_name , fp , None , idref = idref , mode = mode )
elif mode == 'init_no_create' :
for fp2 in _get_existing_records ( cr , fp , module_name ) :
tools . convert_xml_import ( cr , module_name , fp2 , idref , mode = 'init' , )
else :
tools . convert_xml_import ( cr , module_name , fp , idref , mode = mode )
finally :
fp . close ( ) |
def get_sticky ( self , subreddit , bottom = False ) :
"""Return a Submission object for the sticky of the subreddit .
: param bottom : Get the top or bottom sticky . If the subreddit has only
a single sticky , it is considered the top one .""" | url = self . config [ 'sticky' ] . format ( subreddit = six . text_type ( subreddit ) )
param = { 'num' : 2 } if bottom else None
return objects . Submission . from_json ( self . request_json ( url , params = param ) ) |
def remove_parenthesis_around_tz ( cls , timestr ) :
"""get rid of parenthesis around timezone : ( GMT ) = > GMT
: return : the new string if parenthesis were found , ` None ` otherwise""" | parenthesis = cls . TIMEZONE_PARENTHESIS . match ( timestr )
if parenthesis is not None :
return parenthesis . group ( 1 ) |
def _dspace ( irez , d2201 , d2211 , d3210 , d3222 , d4410 , d4422 , d5220 , d5232 , d5421 , d5433 , dedt , del1 , del2 , del3 , didt , dmdt , dnodt , domdt , argpo , argpdot , t , tc , gsto , xfact , xlamo , no , atime , em , argpm , inclm , xli , mm , xni , nodem , nm , ) :
fasx2 = 0.13130908 ;
fasx4 = 2.8843198 ;
fasx6 = 0.37448087 ;
g22 = 5.7686396 ;
g32 = 0.95240898 ;
g44 = 1.8014998 ;
g52 = 1.0508330 ;
g54 = 4.4108898 ;
rptim = 4.37526908801129966e-3 ;
# equates to 7.29211514668855e - 5 rad / sec
stepp = 720.0 ;
stepn = - 720.0 ;
step2 = 259200.0 ;
# - - - - - calculate deep space resonance effects - - - - -
dndt = 0.0 ;
theta = ( gsto + tc * rptim ) % twopi
em = em + dedt * t ;
inclm = inclm + didt * t ;
argpm = argpm + domdt * t ;
nodem = nodem + dnodt * t ;
mm = mm + dmdt * t ;
"""/ / sgp4fix for negative inclinations
/ / the following if statement should be commented out
/ / if ( inclm < 0.0)
/ / inclm = - inclm ;
/ / argpm = argpm - pi ;
/ / nodem = nodem + pi ;
/ * - update resonances : numerical ( euler - maclaurin ) integration - * /
/ * - - - - - epoch restart - - - - - * /
/ / sgp4fix for propagator problems
/ / the following integration works for negative time steps and periods
/ / the specific changes are unknown because the original code was so convoluted
/ / sgp4fix take out atime = 0.0 and fix for faster operation""" | ft = 0.0 ;
if irez != 0 : # sgp4fix streamline check
if atime == 0.0 or t * atime <= 0.0 or fabs ( t ) < fabs ( atime ) :
atime = 0.0 ;
xni = no ;
xli = xlamo ;
# sgp4fix move check outside loop
if t > 0.0 :
delt = stepp ;
else :
delt = stepn ;
iretn = 381 ;
# added for do loop
iret = 0 ;
# added for loop
while iretn == 381 : # - - - - - dot terms calculated - - - - -
# - - - - - near - synchronous resonance terms - - - - -
if irez != 2 :
xndt = del1 * sin ( xli - fasx2 ) + del2 * sin ( 2.0 * ( xli - fasx4 ) ) + del3 * sin ( 3.0 * ( xli - fasx6 ) ) ;
xldot = xni + xfact ;
xnddt = del1 * cos ( xli - fasx2 ) + 2.0 * del2 * cos ( 2.0 * ( xli - fasx4 ) ) + 3.0 * del3 * cos ( 3.0 * ( xli - fasx6 ) ) ;
xnddt = xnddt * xldot ;
else : # - - - - - near - half - day resonance terms - - - - -
xomi = argpo + argpdot * atime ;
x2omi = xomi + xomi ;
x2li = xli + xli ;
xndt = ( d2201 * sin ( x2omi + xli - g22 ) + d2211 * sin ( xli - g22 ) + d3210 * sin ( xomi + xli - g32 ) + d3222 * sin ( - xomi + xli - g32 ) + d4410 * sin ( x2omi + x2li - g44 ) + d4422 * sin ( x2li - g44 ) + d5220 * sin ( xomi + xli - g52 ) + d5232 * sin ( - xomi + xli - g52 ) + d5421 * sin ( xomi + x2li - g54 ) + d5433 * sin ( - xomi + x2li - g54 ) ) ;
xldot = xni + xfact ;
xnddt = ( d2201 * cos ( x2omi + xli - g22 ) + d2211 * cos ( xli - g22 ) + d3210 * cos ( xomi + xli - g32 ) + d3222 * cos ( - xomi + xli - g32 ) + d5220 * cos ( xomi + xli - g52 ) + d5232 * cos ( - xomi + xli - g52 ) + 2.0 * ( d4410 * cos ( x2omi + x2li - g44 ) + d4422 * cos ( x2li - g44 ) + d5421 * cos ( xomi + x2li - g54 ) + d5433 * cos ( - xomi + x2li - g54 ) ) ) ;
xnddt = xnddt * xldot ;
# - - - - - integrator - - - - -
# sgp4fix move end checks to end of routine
if fabs ( t - atime ) >= stepp :
iret = 0 ;
iretn = 381 ;
else :
ft = t - atime ;
iretn = 0 ;
if iretn == 381 :
xli = xli + xldot * delt + xndt * step2 ;
xni = xni + xndt * delt + xnddt * step2 ;
atime = atime + delt ;
nm = xni + xndt * ft + xnddt * ft * ft * 0.5 ;
xl = xli + xldot * ft + xndt * ft * ft * 0.5 ;
if irez != 1 :
mm = xl - 2.0 * nodem + 2.0 * theta ;
dndt = nm - no ;
else :
mm = xl - nodem - argpm + theta ;
dndt = nm - no ;
nm = no + dndt ;
return ( atime , em , argpm , inclm , xli , mm , xni , nodem , dndt , nm , ) |
def zincrby ( self , name , value , amount = 1 ) :
"""Increment the score of the item by ` value `
: param name : str the name of the redis key
: param value :
: param amount :
: return :""" | with self . pipe as pipe :
return pipe . zincrby ( self . redis_key ( name ) , value = self . valueparse . encode ( value ) , amount = amount ) |
def typed_encode ( value , sub_schema , path , net_new_properties , buffer ) :
""": param value : THE DATA STRUCTURE TO ENCODE
: param sub _ schema : dict FROM PATH TO Column DESCRIBING THE TYPE
: param path : list OF CURRENT PATH
: param net _ new _ properties : list FOR ADDING NEW PROPERTIES NOT FOUND IN sub _ schema
: param buffer : UnicodeBuilder OBJECT
: return :""" | try : # from jx _ base import Column
if sub_schema . __class__ . __name__ == 'Column' :
value_json_type = python_type_to_json_type [ value . __class__ ]
column_json_type = es_type_to_json_type [ sub_schema . es_type ]
if value_json_type == column_json_type :
pass
# ok
elif value_json_type == NESTED and all ( python_type_to_json_type [ v . __class__ ] == column_json_type for v in value if v != None ) :
pass
# empty arrays can be anything
else :
from mo_logs import Log
Log . error ( "Can not store {{value}} in {{column|quote}}" , value = value , column = sub_schema . name )
sub_schema = { json_type_to_inserter_type [ value_json_type ] : sub_schema }
if value == None :
from mo_logs import Log
Log . error ( "can not encode null (missing) values" )
elif value is True :
if BOOLEAN_TYPE not in sub_schema :
sub_schema [ BOOLEAN_TYPE ] = { }
net_new_properties . append ( path + [ BOOLEAN_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_BOOLEAN_TYPE )
append ( buffer , 'true}' )
return
elif value is False :
if BOOLEAN_TYPE not in sub_schema :
sub_schema [ BOOLEAN_TYPE ] = { }
net_new_properties . append ( path + [ BOOLEAN_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_BOOLEAN_TYPE )
append ( buffer , 'false}' )
return
_type = value . __class__
if _type in ( dict , Data ) :
if sub_schema . __class__ . __name__ == 'Column' :
from mo_logs import Log
Log . error ( "Can not handle {{column|json}}" , column = sub_schema )
if NESTED_TYPE in sub_schema : # PREFER NESTED , WHEN SEEN BEFORE
if value :
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
append ( buffer , '[' )
_dict2json ( value , sub_schema [ NESTED_TYPE ] , path + [ NESTED_TYPE ] , net_new_properties , buffer )
append ( buffer , ']' + COMMA )
append ( buffer , QUOTED_EXISTS_TYPE )
append ( buffer , text_type ( len ( value ) ) )
append ( buffer , '}' )
else : # SINGLETON LIST
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
append ( buffer , '[{' )
append ( buffer , QUOTED_EXISTS_TYPE )
append ( buffer , '1}]' )
append ( buffer , COMMA )
append ( buffer , QUOTED_EXISTS_TYPE )
append ( buffer , '1}' )
else :
if EXISTS_TYPE not in sub_schema :
sub_schema [ EXISTS_TYPE ] = { }
net_new_properties . append ( path + [ EXISTS_TYPE ] )
if value :
_dict2json ( value , sub_schema , path , net_new_properties , buffer )
else :
append ( buffer , '{' )
append ( buffer , QUOTED_EXISTS_TYPE )
append ( buffer , '1}' )
elif _type is binary_type :
if STRING_TYPE not in sub_schema :
sub_schema [ STRING_TYPE ] = True
net_new_properties . append ( path + [ STRING_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_STRING_TYPE )
append ( buffer , '"' )
try :
v = utf82unicode ( value )
except Exception as e :
raise problem_serializing ( value , e )
for c in v :
append ( buffer , ESCAPE_DCT . get ( c , c ) )
append ( buffer , '"}' )
elif _type is text_type :
if STRING_TYPE not in sub_schema :
sub_schema [ STRING_TYPE ] = True
net_new_properties . append ( path + [ STRING_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_STRING_TYPE )
append ( buffer , '"' )
for c in value :
append ( buffer , ESCAPE_DCT . get ( c , c ) )
append ( buffer , '"}' )
elif _type in integer_types :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , text_type ( value ) )
append ( buffer , '}' )
elif _type in ( float , Decimal ) :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( value ) )
append ( buffer , '}' )
elif _type in ( set , list , tuple , FlatList ) :
if len ( value ) == 0 :
append ( buffer , '{' )
append ( buffer , QUOTED_EXISTS_TYPE )
append ( buffer , '0}' )
elif any ( v . __class__ in ( Data , dict , set , list , tuple , FlatList ) for v in value ) : # THIS IS NOT DONE BECAUSE
if len ( value ) == 1 :
if NESTED_TYPE in sub_schema :
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
_list2json ( value , sub_schema [ NESTED_TYPE ] , path + [ NESTED_TYPE ] , net_new_properties , buffer )
append ( buffer , '}' )
else : # NO NEED TO NEST , SO DO NOT DO IT
typed_encode ( value [ 0 ] , sub_schema , path , net_new_properties , buffer )
else :
if NESTED_TYPE not in sub_schema :
sub_schema [ NESTED_TYPE ] = { }
net_new_properties . append ( path + [ NESTED_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
_list2json ( value , sub_schema [ NESTED_TYPE ] , path + [ NESTED_TYPE ] , net_new_properties , buffer )
append ( buffer , '}' )
else : # ALLOW PRIMITIVE MULTIVALUES
value = [ v for v in value if v != None ]
types = list ( set ( json_type_to_inserter_type [ python_type_to_json_type [ v . __class__ ] ] for v in value ) )
if len ( types ) == 0 : # HANDLE LISTS WITH Nones IN THEM
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
append ( buffer , '[]}' )
elif len ( types ) > 1 :
_list2json ( value , sub_schema , path + [ NESTED_TYPE ] , net_new_properties , buffer )
else :
element_type = types [ 0 ]
if element_type not in sub_schema :
sub_schema [ element_type ] = True
net_new_properties . append ( path + [ element_type ] )
append ( buffer , '{' )
append ( buffer , quote ( element_type ) )
append ( buffer , COLON )
_multivalue2json ( value , sub_schema [ element_type ] , path + [ element_type ] , net_new_properties , buffer )
append ( buffer , '}' )
elif _type is date :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( time . mktime ( value . timetuple ( ) ) ) )
append ( buffer , '}' )
elif _type is datetime :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( time . mktime ( value . timetuple ( ) ) ) )
append ( buffer , '}' )
elif _type is Date :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( value . unix ) )
append ( buffer , '}' )
elif _type is timedelta :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( value . total_seconds ( ) ) )
append ( buffer , '}' )
elif _type is Duration :
if NUMBER_TYPE not in sub_schema :
sub_schema [ NUMBER_TYPE ] = True
net_new_properties . append ( path + [ NUMBER_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NUMBER_TYPE )
append ( buffer , float2json ( value . seconds ) )
append ( buffer , '}' )
elif _type is NullType :
append ( buffer , 'null' )
elif hasattr ( value , '__data__' ) :
typed_encode ( value . __data__ ( ) , sub_schema , path , net_new_properties , buffer )
elif hasattr ( value , '__iter__' ) :
if NESTED_TYPE not in sub_schema :
sub_schema [ NESTED_TYPE ] = { }
net_new_properties . append ( path + [ NESTED_TYPE ] )
append ( buffer , '{' )
append ( buffer , QUOTED_NESTED_TYPE )
_iter2json ( value , sub_schema [ NESTED_TYPE ] , path + [ NESTED_TYPE ] , net_new_properties , buffer )
append ( buffer , '}' )
else :
from mo_logs import Log
Log . error ( text_type ( repr ( value ) ) + " is not JSON serializable" )
except Exception as e :
from mo_logs import Log
Log . error ( text_type ( repr ( value ) ) + " is not JSON serializable" , cause = e ) |
def space_events ( lon = None , lat = None , limit = None , date = None ) :
'''lat & lon expect decimal latitude and longitude values . ( Required )
elevation assumes meters . ( Optional )
limit assumes an integer . Default is 5 . ( Optional )
date expects an ISO 8601 formatted date . ( Optional )''' | base_url = 'http://api.predictthesky.org/?'
if not lon or not lat :
raise ValueError ( "space_events endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5" )
else :
try :
validate_float ( lon , lat )
# Floats are entered / displayed as decimal numbers , but your computer
# ( in fact , your standard C library ) stores them as binary .
# You get some side effects from this transition :
# > > > print len ( repr ( 0.1 ) )
# 19
# > > > print repr ( 0.1)
# 0.1000001
# Thus using decimal to str transition is more reliant
lon = decimal . Decimal ( lon )
lat = decimal . Decimal ( lat )
base_url += "lon=" + str ( lon ) + "&" + "lat=" + str ( lat )
except :
raise ValueError ( "space_events endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5" )
if date :
try :
validate_iso8601 ( date )
base_url += "&" + 'date=' + date
except :
raise ValueError ( "Your date input is not in iso8601 format. ex: 2014-01-01T23:59:59" )
if limit :
if not isinstance ( limit , int ) :
logger . error ( "The limit arg you provided is not the type of int, ignoring it" )
base_url += "&" + "limit=" + str ( limit )
return dispatch_http_get ( base_url ) |
def work ( ) :
"""Implement a worker for write - math . com .""" | global n
cmd = utils . get_project_configuration ( )
if 'worker_api_key' not in cmd :
return ( "You need to define a 'worker_api_key' in your ~/" )
chunk_size = 1000
logging . info ( "Start working with n=%i" , n )
for _ in range ( chunk_size ) : # contact the write - math server and get something to classify
url = "http://www.martin-thoma.de/write-math/api/get_unclassified.php"
response = urlopen ( url )
page_source = response . read ( )
parsed_json = json . loads ( page_source )
if parsed_json is False :
return "Nothing left to classify"
raw_data_json = parsed_json [ 'recording' ]
# Classify
# Check recording
try :
json . loads ( raw_data_json )
except ValueError :
return ( "Raw Data ID %s; Invalid JSON string: %s" % ( parsed_json [ 'id' ] , raw_data_json ) )
# Classify
if use_segmenter_flag :
strokelist = json . loads ( raw_data_json )
beam = se . Beam ( )
for stroke in strokelist :
beam . add_stroke ( stroke )
results = beam . get_writemath_results ( )
else :
results_sym = classify . classify_segmented_recording ( raw_data_json )
results = [ ]
strokelist = json . loads ( raw_data_json )
segmentation = [ list ( range ( len ( strokelist ) ) ) ]
translate = _get_translate ( )
for symbol in results_sym :
s = { 'id' : get_writemath_id ( symbol , translate ) , 'probability' : symbol [ 'probability' ] }
results . append ( { 'probability' : symbol [ 'probability' ] , 'segmentation' : segmentation , 'symbols' : [ s ] } )
print ( "\thttp://write-math.com/view/?raw_data_id=%s" % str ( parsed_json [ 'id' ] ) )
# Submit classification to write - math . com server
results_json = get_json_result ( results , n = n )
headers = { 'User-Agent' : 'Mozilla/5.0' , 'Content-Type' : 'application/x-www-form-urlencoded' }
payload = { 'recording_id' : parsed_json [ 'id' ] , 'results' : results_json , 'api_key' : cmd [ 'worker_api_key' ] }
s = requests . Session ( )
req = requests . Request ( 'POST' , url , headers = headers , data = payload )
prepared = req . prepare ( )
response = s . send ( prepared )
try :
response = json . loads ( response . text )
except ValueError :
return "Invalid JSON response: %s" % response . text
if 'error' in response :
logging . info ( response )
return str ( response )
return "Done - Classified %i recordings" % chunk_size |
def v_unique_name_defintions ( ctx , stmt ) :
"""Make sure that all top - level definitions in a module are unique""" | defs = [ ( 'typedef' , 'TYPE_ALREADY_DEFINED' , stmt . i_typedefs ) , ( 'grouping' , 'GROUPING_ALREADY_DEFINED' , stmt . i_groupings ) ]
def f ( s ) :
for ( keyword , errcode , dict ) in defs :
if s . keyword == keyword and s . arg in dict :
err_add ( ctx . errors , dict [ s . arg ] . pos , errcode , ( s . arg , s . pos ) )
for i in stmt . search ( 'include' ) :
submodulename = i . arg
subm = ctx . get_module ( submodulename )
if subm is not None :
for s in subm . substmts :
for ss in s . substmts :
iterate_stmt ( ss , f ) |
def clear_caches ( self ) :
"""Clear the mice and repertoire caches .""" | self . _single_node_repertoire_cache . clear ( )
self . _repertoire_cache . clear ( )
self . _mice_cache . clear ( ) |
def inc_convert ( self , value ) :
"""Default converter for the inc : / / protocol .""" | if not os . path . isabs ( value ) :
value = os . path . join ( self . base , value )
with codecs . open ( value , 'r' , encoding = 'utf-8' ) as f :
result = json . load ( f )
return result |
def class_balance ( y_train , y_test = None , ax = None , labels = None , ** kwargs ) :
"""Quick method :
One of the biggest challenges for classification models is an imbalance of
classes in the training data . This function vizualizes the relationship of
the support for each class in both the training and test data by
displaying how frequently each class occurs as a bar graph .
The figure can be displayed in two modes :
1 . Balance mode : show the frequency of each class in the dataset .
2 . Compare mode : show the relationship of support in train and test data .
Balance mode is the default if only y _ train is specified . Compare mode
happens when both y _ train and y _ test are specified .
Parameters
y _ train : array - like
Array or list of shape ( n , ) that containes discrete data .
y _ test : array - like , optional
Array or list of shape ( m , ) that contains discrete data . If
specified , the bar chart will be drawn in compare mode .
ax : matplotlib Axes , default : None
The axis to plot the figure on . If None is passed in the current axes
will be used ( or generated if required ) .
labels : list , optional
A list of class names for the x - axis if the target is already encoded .
Ensure that the labels are ordered lexicographically with respect to
the values in the target . A common use case is to pass
LabelEncoder . classes \ _ as this parameter . If not specified , the labels
in the data will be used .
kwargs : dict , optional
Keyword arguments passed to the super class . Here , used
to colorize the bars in the histogram .
Returns
ax : matplotlib axes
Returns the axes that the class balance plot was drawn on .""" | # Instantiate the visualizer
visualizer = ClassBalance ( ax = ax , labels = labels , ** kwargs )
# Fit and transform the visualizer ( calls draw )
visualizer . fit ( y_train , y_test )
visualizer . finalize ( )
# Return the axes object on the visualizer
return visualizer . ax |
def render ( genshi_data , saltenv = 'base' , sls = '' , method = 'xml' , ** kws ) :
'''Render a Genshi template . A method should be passed in as part of the
kwargs . If no method is passed in , xml is assumed . Valid methods are :
. . code - block :
- xml
- xhtml
- html
- text
- newtext
- oldtext
Note that the ` ` text ` ` method will call ` ` NewTextTemplate ` ` . If ` ` oldtext ` `
is desired , it must be called explicitly
: rtype : A Python data structure''' | if not HAS_LIBS :
return { }
if not isinstance ( genshi_data , six . string_types ) :
genshi_data = genshi_data . read ( )
if genshi_data . startswith ( '#!' ) :
genshi_data = genshi_data [ ( genshi_data . find ( '\n' ) + 1 ) : ]
if not genshi_data . strip ( ) :
return { }
if method == 'text' or method == 'newtext' :
tmpl = NewTextTemplate ( genshi_data )
elif method == 'oldtext' :
tmpl = OldTextTemplate ( genshi_data )
else :
tmpl = MarkupTemplate ( genshi_data )
return tmpl . generate ( ** kws ) . render ( method ) |
def _calc_taub ( w , aod700 , p ) :
"""Calculate the taub coefficient""" | p0 = 101325.
tb1 = 1.82 + 0.056 * np . log ( w ) + 0.0071 * np . log ( w ) ** 2
tb0 = 0.33 + 0.045 * np . log ( w ) + 0.0096 * np . log ( w ) ** 2
tbp = 0.0089 * w + 0.13
taub = tb1 * aod700 + tb0 + tbp * np . log ( p / p0 )
return taub |
def checkArgs ( args ) :
"""Checks the arguments and options .
: param args : an object containing the options of the program .
: type args : argparse . Namespace
: returns : ` ` True ` ` if everything was OK .
If there is a problem with an option , an exception is raised using the
: py : class : ` ProgramError ` class , a message is printed to the
: class : ` sys . stderr ` and the program exists with code 1.
. . note : :
Only one operation for markers and one operation for samples can be
done at a time . Hence , one of ` ` - - exclude ` ` or ` ` - - extract ` ` can be
done for markers , and one of ` ` - - remove ` ` or ` ` - - keep ` ` can be done for
samples .""" | # Check the input files
if not args . is_bfile and not args . is_tfile and not args . is_file :
msg = "needs one input file type (--is-bfile, --is-tfile or --is-file)"
raise ProgramError ( msg )
if args . is_bfile and not args . is_tfile and not args . is_file :
for fileName in [ args . ifile + i for i in [ ".bed" , ".bim" , ".fam" ] ] :
if not os . path . isfile ( fileName ) :
msg = "{}: no such file" . format ( fileName )
raise ProgramError ( msg )
elif args . is_tfile and not args . is_bfile and not args . is_file :
for fileName in [ args . ifile + i for i in [ ".tped" , ".tfam" ] ] :
if not os . path . isfile ( fileName ) :
msg = "{}: no such file" . format ( fileName )
raise ProgramError ( msg )
elif args . is_file and not args . is_bfile and not args . is_tfile :
for fileName in [ args . ifile + i for i in [ ".ped" , ".map" ] ] :
if not os . path . isfile ( fileName ) :
msg = "{}: no such file" . format ( fileName )
raise ProgramError ( msg )
else :
msg = ( "needs only one input file type (--is-bfile, --is-tfile or " "--is-file)" )
raise ProgramError ( msg )
# Check that we have at least one of exclude , extract remove or keep
if args . exclude is None and args . extract is None and args . remove is None and args . keep is None :
msg = "needs at least one of --exclude, --extract, --remove or --keep"
raise ProgramError ( msg )
# Check for SNPs
if args . exclude is not None and args . extract is None :
if not os . path . isfile ( args . exclude ) :
msg = "{}: no such file" . format ( args . exclude )
raise ProgramError ( msg )
elif args . extract is not None and args . exclude is None :
if not os . path . isfile ( args . extract ) :
msg = "{}: no such file" . format ( args . extract )
raise ProgramError ( msg )
elif args . exclude is not None and args . extract is not None :
msg = "use only one of --extract or --exclude"
raise ProgramError ( msg )
# Check for samples
if args . remove is not None and args . keep is None :
if not os . path . isfile ( args . remove ) :
msg = "{}: no such file" . format ( args . remove )
raise ProgramError ( msg )
elif args . keep is not None and args . remove is None :
if not os . path . isfile ( args . keep ) :
msg = "{}: no such file" . format ( args . keep )
raise ProgramError ( msg )
elif args . remove is not None and args . keep is not None :
msg = "use only one of --keep or --remove"
raise ProgramError ( msg )
return True |
def filter_by_moys ( self , moys ) :
"""Filter the Data Collection based on a list of minutes of the year .
Args :
moys : A List of minutes of the year [ 0 . . 8759 * 60]
Return :
A new Data Collection with filtered data""" | t_s = 60 / self . header . analysis_period . timestep
st_ind = self . header . analysis_period . st_time . moy / t_s
if self . header . analysis_period . is_reversed is False :
_filt_indices = [ int ( moy / t_s - st_ind ) for moy in moys ]
else :
if self . header . analysis_period . is_leap_year is False :
eoy_ind = 8759 * self . header . analysis_period . timestep - st_ind
else :
eoy_ind = 8783 * self . header . analysis_period . timestep - st_ind
_filt_indices = [ ]
for moy in moys :
ind = moy / t_s
if ind > st_ind :
_filt_indices . append ( int ( ind - st_ind ) )
else :
_filt_indices . append ( int ( ind + eoy_ind ) )
_filt_values = [ self . _values [ i ] for i in _filt_indices ]
_filt_datetimes = [ self . datetimes [ i ] for i in _filt_indices ]
_filt_header = self . header . duplicate ( )
coll = HourlyDiscontinuousCollection ( _filt_header , _filt_values , _filt_datetimes )
coll . _validated_a_period = True
return coll |
def search_data_std ( Channel , RunNos , RepeatNos , directoryPath = '.' ) :
"""Lets you find multiple datasets at once assuming they have a
filename which contains a pattern of the form :
CH < ChannelNo > _ RUN00 . . . < RunNo > _ REPEAT00 . . . < RepeatNo >
Parameters
Channel : int
The channel you want to load
RunNos : sequence
Sequence of run numbers you want to load
RepeatNos : sequence
Sequence of repeat numbers you want to load
directoryPath : string , optional
The path to the directory housing the data
The default is the current directory
Returns
Data _ filepaths : list
A list containing the filepaths to the matching files""" | files = glob ( '{}/*' . format ( directoryPath ) )
files_CorrectChannel = [ ]
for file_ in files :
if 'CH{}' . format ( Channel ) in file_ :
files_CorrectChannel . append ( file_ )
files_CorrectRunNo = [ ]
for RunNo in RunNos :
files_match = _fnmatch . filter ( files_CorrectChannel , '*RUN*0{}_*' . format ( RunNo ) )
for file_ in files_match :
files_CorrectRunNo . append ( file_ )
files_CorrectRepeatNo = [ ]
for RepeatNo in RepeatNos :
files_match = _fnmatch . filter ( files_CorrectRunNo , '*REPEAT*0{}.*' . format ( RepeatNo ) )
for file_ in files_match :
files_CorrectRepeatNo . append ( file_ )
return files_CorrectRepeatNo |
def rsky_lhood ( self , rsky , ** kwargs ) :
"""Evaluates Rsky likelihood at provided position ( s )
: param rsky :
position
: param * * kwargs :
Keyword arguments passed to : func : ` BinaryPopulation . rsky _ distribution `""" | dist = self . rsky_distribution ( ** kwargs )
return dist ( rsky ) |
def block ( self , tofile = "block.dat" ) :
'''获取证券板块信息
: param tofile :
: return : pd . dataFrame or None''' | with self . client . connect ( * self . bestip ) :
data = self . client . get_and_parse_block_info ( tofile )
return self . client . to_df ( data ) |
def get_value ( self , expression ) :
"""Return value of expression .""" | self . _check_valid ( )
return super ( Result , self ) . get_value ( expression ) |
def _get_restore_function ( self ) :
"""Return the binary function for restoring terminal attributes .
: return : function ( signal , frame ) = > None :""" | if os . name == 'nt' or not self . getch_enabled :
return lambda signal , frame : None
try :
fd = self . stdin . fileno ( )
initial = termios . tcgetattr ( fd )
except termios . error :
return lambda signal , frame : None
return lambda signal , frame : termios . tcsetattr ( fd , termios . TCSADRAIN , initial ) |
def check_password ( cls , instance , raw_password , enable_hash_migration = True ) :
"""checks string with users password hash using password manager
: param instance :
: param raw _ password :
: param enable _ hash _ migration : if legacy hashes should be migrated
: return :""" | verified , replacement_hash = instance . passwordmanager . verify_and_update ( raw_password , instance . user_password )
if enable_hash_migration and replacement_hash :
if six . PY2 :
instance . user_password = replacement_hash . decode ( "utf8" )
else :
instance . user_password = replacement_hash
return verified |
def match ( self , package ) :
"""Match ` ` package ` ` with the requirement .
: param package : Package to test with the requirement .
: type package : package expression string or : class : ` Package `
: returns : ` ` True ` ` if ` ` package ` ` satisfies the requirement .
: rtype : bool""" | if isinstance ( package , basestring ) :
from . packages import Package
package = Package . parse ( package )
if self . name != package . name :
return False
if self . version_constraints and package . version not in self . version_constraints :
return False
if self . build_options :
if package . build_options :
if self . build_options - package . build_options :
return False
else :
return True
else :
return False
else :
return True |
def l2traceroute_input_protocolType_IP_l4protocol ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
l2traceroute = ET . Element ( "l2traceroute" )
config = l2traceroute
input = ET . SubElement ( l2traceroute , "input" )
protocolType = ET . SubElement ( input , "protocolType" )
IP = ET . SubElement ( protocolType , "IP" )
l4protocol = ET . SubElement ( IP , "l4protocol" )
l4protocol . text = kwargs . pop ( 'l4protocol' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def start ( config , bugnumber = "" ) :
"""Create a new topic branch .""" | repo = config . repo
if bugnumber :
summary , bugnumber , url = get_summary ( config , bugnumber )
else :
url = None
summary = None
if summary :
summary = input ( 'Summary ["{}"]: ' . format ( summary ) ) . strip ( ) or summary
else :
summary = input ( "Summary: " ) . strip ( )
branch_name = ""
if bugnumber :
if is_github ( { "bugnumber" : bugnumber , "url" : url } ) :
branch_name = "{}-" . format ( bugnumber )
else :
branch_name = "{}-" . format ( bugnumber )
def clean_branch_name ( string ) :
string = re . sub ( r"\s+" , " " , string )
string = string . replace ( " " , "-" )
string = string . replace ( "->" , "-" ) . replace ( "=>" , "-" )
for each in "@%^&:'\"/(),[]{}!.?`$<>#*;=" :
string = string . replace ( each , "" )
string = re . sub ( "-+" , "-" , string )
string = string . strip ( "-" )
return string . lower ( ) . strip ( )
branch_name += clean_branch_name ( summary )
if not branch_name :
error_out ( "Must provide a branch name" )
# Check that the branch doesn ' t already exist
found = list ( find ( repo , branch_name , exact = True ) )
if found :
error_out ( "There is already a branch called {!r}" . format ( found [ 0 ] . name ) )
new_branch = repo . create_head ( branch_name )
new_branch . checkout ( )
if config . verbose :
click . echo ( "Checkout out new branch: {}" . format ( branch_name ) )
save ( config . configfile , summary , branch_name , bugnumber = bugnumber , url = url ) |
def parse_resources ( self , resources ) :
"""Parses and sets resources in the model using a factory .""" | self . resources = { }
resource_factory = ResourceFactory ( )
for res_id , res_value in resources . items ( ) :
r = resource_factory . create_resource ( res_id , res_value )
if r :
if r . resource_type in self . resources :
self . resources [ r . resource_type ] . append ( r )
else :
self . resources [ r . resource_type ] = [ r ] |
def _init_from_dict ( self , model_dict ) :
"""Initiate self from a model _ dict to make sure attributes such as vars , params are available .
Creates lists of alphabetically sorted independent vars , dependent vars , sigma vars , and parameters .
Finally it creates a signature for this model so it can be called nicely . This signature only contains
independent vars and params , as one would expect .
: param model _ dict : dict of ( dependent _ var , expression ) pairs .""" | sort_func = lambda symbol : symbol . name
self . model_dict = OrderedDict ( sorted ( model_dict . items ( ) , key = lambda i : sort_func ( i [ 0 ] ) ) )
# Everything at the bottom of the toposort is independent , at the top
# dependent , and the rest interdependent .
ordered = list ( toposort ( self . connectivity_mapping ) )
independent = sorted ( ordered . pop ( 0 ) , key = sort_func )
self . dependent_vars = sorted ( ordered . pop ( - 1 ) , key = sort_func )
self . interdependent_vars = sorted ( [ item for items in ordered for item in items ] , key = sort_func )
# ` independent ` contains both params and vars , needs to be separated
self . independent_vars = [ s for s in independent if not isinstance ( s , Parameter ) and not s in self ]
self . params = [ s for s in independent if isinstance ( s , Parameter ) ]
try :
assert not any ( isinstance ( var , Parameter ) for var in self . dependent_vars )
assert not any ( isinstance ( var , Parameter ) for var in self . interdependent_vars )
except AssertionError :
raise ModelError ( '`Parameter`\'s can not feature in the role ' 'of `Variable`' )
# Make Variable object corresponding to each depedent var .
self . sigmas = { var : Variable ( name = 'sigma_{}' . format ( var . name ) ) for var in self . dependent_vars } |
def hash_of_signed_transaction ( txn_obj ) :
'''Regenerate the hash of the signed transaction object .
1 . Infer the chain ID from the signature
2 . Strip out signature from transaction
3 . Annotate the transaction with that ID , if available
4 . Take the hash of the serialized , unsigned , chain - aware transaction
Chain ID inference and annotation is according to EIP - 155
See details at https : / / github . com / ethereum / EIPs / blob / master / EIPS / eip - 155 . md
: return : the hash of the provided transaction , to be signed''' | ( chain_id , _v ) = extract_chain_id ( txn_obj . v )
unsigned_parts = strip_signature ( txn_obj )
if chain_id is None :
signable_transaction = UnsignedTransaction ( * unsigned_parts )
else :
extended_transaction = unsigned_parts + [ chain_id , 0 , 0 ]
signable_transaction = ChainAwareUnsignedTransaction ( * extended_transaction )
return signable_transaction . hash ( ) |
def get_yield_stress ( self , n ) :
"""Gets the yield stress for a given direction
Args :
n ( 3x1 array - like ) : direction for which to find the
yield stress""" | # TODO : root finding could be more robust
comp = root ( self . get_stability_criteria , - 1 , args = n )
tens = root ( self . get_stability_criteria , 1 , args = n )
return ( comp . x , tens . x ) |
def _recv_guess ( self , value ) :
"""Take the binary spew and try to make it into a float or integer . If
that can ' t be done , return a string .
Note : this is generally a bad idea , as values can be seriously mangled
by going from float - > string - > float . You ' ll generally be better off
using a format specifier and binary argument passing .""" | if self . give_warnings :
w = "Warning: Guessing input format for {}. This can give wildly incorrect values. Consider specifying a format and sending binary data." . format ( value )
warnings . warn ( w , Warning )
tmp_value = value . decode ( )
try :
float ( tmp_value )
if len ( tmp_value . split ( "." ) ) == 1 : # integer
return int ( tmp_value )
else : # float
return float ( tmp_value )
except ValueError :
pass
# Return as string
return self . _recv_string ( value ) |
def substitute_namespace_into_graph ( self , graph ) :
"""Creates a graph from the local namespace of the code ( to be used after the execution of the code )
: param graph : The graph to use as a recipient of the namespace
: return : the updated graph""" | for key , value in self . namespace . items ( ) :
try :
nodes = graph . vs . select ( name = key )
for node in nodes :
for k , v in value . items ( ) :
node [ k ] = v
except :
pass
try :
nodes = graph . es . select ( name = key )
for node in nodes :
for k , v in value . items ( ) :
node [ k ] = v
except :
pass
return graph |
def enable_audit_device ( self , device_type , description = None , options = None , path = None ) :
"""Enable a new audit device at the supplied path .
The path can be a single word name or a more complex , nested path .
Supported methods :
PUT : / sys / audit / { path } . Produces : 204 ( empty body )
: param device _ type : Specifies the type of the audit device .
: type device _ type : str | unicode
: param description : Human - friendly description of the audit device .
: type description : str | unicode
: param options : Configuration options to pass to the audit device itself . This is
dependent on the audit device type .
: type options : str | unicode
: param path : Specifies the path in which to enable the audit device . This is part of
the request URL .
: type path : str | unicode
: return : The response of the request .
: rtype : requests . Response""" | if path is None :
path = device_type
params = { 'type' : device_type , 'description' : description , 'options' : options , }
api_path = '/v1/sys/audit/{path}' . format ( path = path )
return self . _adapter . post ( url = api_path , json = params ) |
def setimdi ( self , node ) : # OBSOLETE
"""OBSOLETE""" | ns = { 'imdi' : 'http://www.mpi.nl/IMDI/Schema/IMDI' }
self . metadatatype = MetaDataType . IMDI
if LXE :
self . metadata = ElementTree . tostring ( node , xml_declaration = False , pretty_print = True , encoding = 'utf-8' )
else :
self . metadata = ElementTree . tostring ( node , encoding = 'utf-8' )
n = node . xpath ( 'imdi:Session/imdi:Title' , namespaces = ns )
if n and n [ 0 ] . text :
self . _title = n [ 0 ] . text
n = node . xpath ( 'imdi:Session/imdi:Date' , namespaces = ns )
if n and n [ 0 ] . text :
self . _date = n [ 0 ] . text
n = node . xpath ( '//imdi:Source/imdi:Access/imdi:Publisher' , namespaces = ns )
if n and n [ 0 ] . text :
self . _publisher = n [ 0 ] . text
n = node . xpath ( '//imdi:Source/imdi:Access/imdi:Availability' , namespaces = ns )
if n and n [ 0 ] . text :
self . _license = n [ 0 ] . text
n = node . xpath ( '//imdi:Languages/imdi:Language/imdi:ID' , namespaces = ns )
if n and n [ 0 ] . text :
self . _language = n [ 0 ] . text |
def decode_token ( self , token , key , algorithms = None , ** kwargs ) :
"""A JSON Web Key ( JWK ) is a JavaScript Object Notation ( JSON ) data
structure that represents a cryptographic key . This specification
also defines a JWK Set JSON data structure that represents a set of
JWKs . Cryptographic algorithms and identifiers for use with this
specification are described in the separate JSON Web Algorithms ( JWA )
specification and IANA registries established by that specification .
https : / / tools . ietf . org / html / rfc7517
: param str token : A signed JWS to be verified .
: param str key : A key to attempt to verify the payload with .
: param str , list algorithms : ( optional ) Valid algorithms that should be
used to verify the JWS . Defaults to ` [ ' RS256 ' ] `
: param str audience : ( optional ) The intended audience of the token . If
the " aud " claim is included in the claim set , then the audience
must be included and must equal the provided claim .
: param str , iterable issuer : ( optional ) Acceptable value ( s ) for the
issuer of the token . If the " iss " claim is included in the claim
set , then the issuer must be given and the claim in the token must
be among the acceptable values .
: param str subject : ( optional ) The subject of the token . If the " sub "
claim is included in the claim set , then the subject must be
included and must equal the provided claim .
: param str access _ token : ( optional ) An access token returned alongside
the id _ token during the authorization grant flow . If the " at _ hash "
claim is included in the claim set , then the access _ token must be
included , and it must match the " at _ hash " claim .
: param dict options : ( optional ) A dictionary of options for skipping
validation steps .
defaults :
. . code - block : : python
' verify _ signature ' : True ,
' verify _ aud ' : True ,
' verify _ iat ' : True ,
' verify _ exp ' : True ,
' verify _ nbf ' : True ,
' verify _ iss ' : True ,
' verify _ sub ' : True ,
' verify _ jti ' : True ,
' leeway ' : 0,
: return : The dict representation of the claims set , assuming the
signature is valid and all requested data validation passes .
: rtype : dict
: raises jose . exceptions . JWTError : If the signature is invalid in any
way .
: raises jose . exceptions . ExpiredSignatureError : If the signature has
expired .
: raises jose . exceptions . JWTClaimsError : If any claim is invalid in any
way .""" | return jwt . decode ( token , key , audience = kwargs . pop ( 'audience' , None ) or self . _client_id , algorithms = algorithms or [ 'RS256' ] , ** kwargs ) |
def set_monitor_timeout ( timeout , power = 'ac' , scheme = None ) :
'''Set the monitor timeout in minutes for the given power scheme
Args :
timeout ( int ) :
The amount of time in minutes before the monitor will timeout
power ( str ) :
Set the value for AC or DC power . Default is ` ` ac ` ` . Valid options
are :
- ` ` ac ` ` ( AC Power )
- ` ` dc ` ` ( Battery )
scheme ( str ) :
The scheme to use , leave as ` ` None ` ` to use the current . Default is
` ` None ` ` . This can be the GUID or the Alias for the Scheme . Known
Aliases are :
- ` ` SCHEME _ BALANCED ` ` - Balanced
- ` ` SCHEME _ MAX ` ` - Power saver
- ` ` SCHEME _ MIN ` ` - High performance
Returns :
bool : ` ` True ` ` if successful , otherwise ` ` False ` `
CLI Example :
. . code - block : : bash
# Sets the monitor timeout to 30 minutes
salt ' * ' powercfg . set _ monitor _ timeout 30''' | return _set_powercfg_value ( scheme = scheme , sub_group = 'SUB_VIDEO' , setting_guid = 'VIDEOIDLE' , power = power , value = timeout ) |
def has_parent_group ( self , group ) :
"""Retuns whether the object is a child of the : obj : ` Group ` ` ` group ` `""" | if isinstance ( group , str ) :
return self . _has_parent_group_by_name ( group )
else :
return self . _has_parent_group_by_object ( group ) |
def update ( self , resource , rid , updates ) :
"""Updates the resource with id ' rid ' with the given updates dictionary .""" | if resource [ - 1 ] != '/' :
resource += '/'
resource += str ( rid )
return self . put ( resource , data = updates ) |
def _make_ntgrid ( grid ) :
"""make a named tuple grid
[ [ " " , " a b " , " b c " , " c d " ] ,
[ " x y " , 1 , 2 , 3 ] ,
[ " y z " , 4 , 5 , 6 ] ,
[ " z z " , 7 , 8 , 9 ] , ]
will return
ntcol ( x _ y = ntrow ( a _ b = 1 , b _ c = 2 , c _ d = 3 ) ,
y _ z = ntrow ( a _ b = 4 , b _ c = 5 , c _ d = 6 ) ,
z _ z = ntrow ( a _ b = 7 , b _ c = 8 , c _ d = 9 ) )""" | hnames = [ _nospace ( n ) for n in grid [ 0 ] [ 1 : ] ]
vnames = [ _nospace ( row [ 0 ] ) for row in grid [ 1 : ] ]
vnames_s = " " . join ( vnames )
hnames_s = " " . join ( hnames )
ntcol = collections . namedtuple ( 'ntcol' , vnames_s )
ntrow = collections . namedtuple ( 'ntrow' , hnames_s )
rdict = [ dict ( list ( zip ( hnames , row [ 1 : ] ) ) ) for row in grid [ 1 : ] ]
ntrows = [ ntrow ( ** rdict [ i ] ) for i , name in enumerate ( vnames ) ]
ntcols = ntcol ( ** dict ( list ( zip ( vnames , ntrows ) ) ) )
return ntcols |
def is_all_field_none ( self ) :
""": rtype : bool""" | if self . _id_ is not None :
return False
if self . _created is not None :
return False
if self . _updated is not None :
return False
if self . _attachment is not None :
return False
return True |
def get_label ( self , lang = None ) :
"""Return label for given lang or any default
: param lang : Language to request
: return : Label value
: rtype : Literal""" | x = None
if lang is None :
for obj in self . graph . objects ( self . asNode ( ) , RDFS . label ) :
return obj
for obj in self . graph . objects ( self . asNode ( ) , RDFS . label ) :
x = obj
if x . language == lang :
return x
return x |
def _confused_state ( self , request : Request ) -> Type [ BaseState ] :
"""If we ' re confused , find which state to call .""" | origin = request . register . get ( Register . STATE )
if origin in self . _allowed_states :
try :
return import_class ( origin )
except ( AttributeError , ImportError ) :
pass
return import_class ( settings . DEFAULT_STATE ) |
def output_file ( filename , title = "Bokeh Plot" , mode = "cdn" , root_dir = None ) :
'''Configure the default output state to generate output saved
to a file when : func : ` show ` is called .
Does not change the current ` ` Document ` ` from ` ` curdoc ( ) ` ` . File and notebook
output may be active at the same time , so e . g . , this does not clear the
effects of ` ` output _ notebook ( ) ` ` .
Args :
filename ( str ) : a filename for saving the HTML document
title ( str , optional ) : a title for the HTML document ( default : " Bokeh Plot " )
mode ( str , optional ) : how to include BokehJS ( default : ` ` ' cdn ' ` ` )
One of : ` ` ' inline ' ` ` , ` ` ' cdn ' ` ` , ` ` ' relative ( - dev ) ' ` ` or
` ` ' absolute ( - dev ) ' ` ` . See : class : ` bokeh . resources . Resources ` for more details .
root _ dir ( str , optional ) : root directory to use for ' absolute ' resources . ( default : None )
This value is ignored for other resource types , e . g . ` ` INLINE ` ` or
` ` CDN ` ` .
Returns :
None
. . note : :
Generally , this should be called at the beginning of an interactive
session or the top of a script .
. . warning : :
This output file will be overwritten on every save , e . g . , each time
show ( ) or save ( ) is invoked .''' | curstate ( ) . output_file ( filename , title = title , mode = mode , root_dir = root_dir ) |
def string_format ( msg , method ) :
"""Format a string ( upper , lower , formal , sentence ) .
: param str msg : The user ' s message .
: param str method : One of ` ` uppercase ` ` , ` ` lowercase ` ` ,
` ` sentence ` ` or ` ` formal ` ` .
: return str : The reformatted string .""" | if method == "uppercase" :
return msg . upper ( )
elif method == "lowercase" :
return msg . lower ( )
elif method == "sentence" :
return msg . capitalize ( )
elif method == "formal" :
return string . capwords ( msg ) |
def _deserialize ( self , value , attr , obj ) :
"""Deserialize value as a Unix timestamp ( in float seconds ) .
Handle both numeric and UTC isoformat strings .""" | if value is None :
return None
try :
return float ( value )
except ValueError :
parsed = parser . parse ( value )
if parsed . tzinfo :
if parsed . utcoffset ( ) . total_seconds ( ) :
raise ValidationError ( "Timestamps must be defined in UTC" )
parsed = parsed . replace ( tzinfo = None )
return ( parsed - TimestampField . EPOCH ) . total_seconds ( ) |
def Prod ( a , axis , keep_dims ) :
"""Prod reduction op .""" | return np . prod ( a , axis = axis if not isinstance ( axis , np . ndarray ) else tuple ( axis ) , keepdims = keep_dims ) , |
def convert2wavenumber ( self ) :
"""Convert from wavelengths to wavenumber""" | for chname in self . rsr . keys ( ) :
elems = [ k for k in self . rsr [ chname ] . keys ( ) ]
for sat in elems :
if sat == "wavelength" :
LOG . debug ( "Get the wavenumber from the wavelength: sat=%s chname=%s" , sat , chname )
wnum = 1. / ( 1e-4 * self . rsr [ chname ] [ sat ] [ : ] )
# microns to cm
self . rsr [ chname ] [ 'wavenumber' ] = wnum [ : : - 1 ]
else :
if type ( self . rsr [ chname ] [ sat ] ) is dict :
for name in self . rsr [ chname ] [ sat ] . keys ( ) :
resp = self . rsr [ chname ] [ sat ] [ name ] [ : ]
self . rsr [ chname ] [ sat ] [ name ] = resp [ : : - 1 ]
else :
resp = self . rsr [ chname ] [ sat ] [ : ]
self . rsr [ chname ] [ sat ] = resp [ : : - 1 ]
for chname in self . rsr . keys ( ) :
del self . rsr [ chname ] [ 'wavelength' ]
self . unit = 'cm-1' |
def read_xml ( cls , url , markup , game , players ) :
"""read xml object
: param url : contents url
: param markup : markup provider
: param game : MLBAM Game object
: param players : MLBAM Players object
: return : pitchpx . game . game . Game object""" | innings = Inning ( game , players )
base_url = "" . join ( [ url , cls . DIRECTORY ] )
# hit location data
hit_location = cls . _read_hit_chart_data ( MlbamUtil . find_xml ( '/' . join ( [ base_url , cls . FILENAME_INNING_HIT ] ) , markup ) )
# create for atbat & pitch data
for inning in MlbamUtil . find_xml_all ( base_url , markup , cls . TAG , cls . FILENAME_PATTERN ) :
soup = MlbamUtil . find_xml ( "/" . join ( [ base_url , inning . get_text ( ) . strip ( ) ] ) , markup )
inning_number = int ( soup . inning [ 'num' ] )
for inning_type in cls . INNINGS . keys ( ) :
inning_soup = soup . inning . find ( inning_type )
if inning_soup is None :
break
innings . _inning_events ( inning_soup , inning_number , cls . INNINGS [ inning_type ] , hit_location )
innings . _inning_actions ( inning_soup , inning_number , cls . INNINGS [ inning_type ] )
return innings |
def send ( self , url , data , headers ) :
"""Spawn an async request to a remote webserver .""" | eventlet . spawn ( self . _send_payload , ( url , data , headers ) ) |
def _dichFind ( self , needle , currHaystack , offset , lst = None ) :
"""dichotomic search , if lst is None , will return the first position found . If it ' s a list , will return a list of all positions in lst . returns - 1 or [ ] if no match found""" | if len ( currHaystack ) == 1 :
if ( offset <= ( len ( self ) - len ( needle ) ) ) and ( currHaystack [ 0 ] & needle [ 0 ] ) > 0 and ( self [ offset + len ( needle ) - 1 ] & needle [ - 1 ] ) > 0 :
found = True
for i in xrange ( 1 , len ( needle ) - 1 ) :
if self [ offset + i ] & needle [ i ] == 0 :
found = False
break
if found :
if lst is not None :
lst . append ( offset )
else :
return offset
else :
if lst is None :
return - 1
else :
if ( offset <= ( len ( self ) - len ( needle ) ) ) :
if lst is not None :
self . _dichFind ( needle , currHaystack [ : len ( currHaystack ) / 2 ] , offset , lst )
self . _dichFind ( needle , currHaystack [ len ( currHaystack ) / 2 : ] , offset + len ( currHaystack ) / 2 , lst )
else :
v1 = self . _dichFind ( needle , currHaystack [ : len ( currHaystack ) / 2 ] , offset , lst )
if v1 > - 1 :
return v1
return self . _dichFind ( needle , currHaystack [ len ( currHaystack ) / 2 : ] , offset + len ( currHaystack ) / 2 , lst )
return - 1 |
def multi_encode ( self , message , masking_key = None , opcode = None , rsv1 = 0 , rsv2 = 0 , rsv3 = 0 , max_payload = 0 ) :
'''Encode a ` ` message ` ` into several frames depending on size .
Returns a generator of bytes to be sent over the wire .''' | max_payload = max ( 2 , max_payload or self . _max_payload )
opcode , masking_key , data = self . _info ( message , opcode , masking_key )
while data :
if len ( data ) >= max_payload :
chunk , data , fin = ( data [ : max_payload ] , data [ max_payload : ] , 0 )
else :
chunk , data , fin = data , b'' , 1
yield self . _encode ( chunk , opcode , masking_key , fin , rsv1 , rsv2 , rsv3 ) |
def _get_dicts_from_redis ( self , name , index_name , redis_prefix , item ) :
"""Retrieve the data of an item from redis and put it in an index and data dictionary to match the
common query interface .""" | r = self . _redis
data_dict = { }
data_index_dict = { }
if redis_prefix is None :
raise KeyError ( "redis_prefix is missing" )
if r . scard ( redis_prefix + index_name + str ( item ) ) > 0 :
data_index_dict [ str ( item ) ] = r . smembers ( redis_prefix + index_name + str ( item ) )
for i in data_index_dict [ item ] :
json_data = r . get ( redis_prefix + name + str ( int ( i ) ) )
data_dict [ i ] = self . _deserialize_data ( json_data )
return ( data_dict , data_index_dict )
raise KeyError ( "No Data found in Redis for " + item ) |
def query_keymap ( self ) :
"""Return a bit vector for the logical state of the keyboard ,
where each bit set to 1 indicates that the corresponding key is
currently pressed down . The vector is represented as a list of 32
integers . List item N contains the bits for keys 8N to 8N + 7
with the least significant bit in the byte representing key 8N .""" | r = request . QueryKeymap ( display = self . display )
return r . map |
def check_row ( state , index , missing_msg = None , expand_msg = None ) :
"""Zoom in on a particular row in the query result , by index .
After zooming in on a row , which is represented as a single - row query result ,
you can use ` ` has _ equal _ value ( ) ` ` to verify whether all columns in the zoomed in solution
query result have a match in the student query result .
Args :
index : index of the row to zoom in on ( zero - based indexed ) .
missing _ msg : if specified , this overrides the automatically generated feedback
message in case the row is missing in the student query result .
expand _ msg : if specified , this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain .
: Example :
Suppose we are testing the following SELECT statements
* solution : ` ` SELECT artist _ id as id , name FROM artists LIMIT 5 ` `
* student : ` ` SELECT artist _ id , name FROM artists LIMIT 2 ` `
We can write the following SCTs : : :
# fails , since row 3 at index 2 is not in the student result
Ex ( ) . check _ row ( 2)
# passes , since row 2 at index 1 is in the student result
Ex ( ) . check _ row ( 0)""" | if missing_msg is None :
missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look."
if expand_msg is None :
expand_msg = "Have another look at row {{index + 1}} in your query result. "
msg_kwargs = { "index" : index }
# check that query returned something
has_result ( state )
stu_res = state . student_result
sol_res = state . solution_result
n_sol = len ( next ( iter ( sol_res . values ( ) ) ) )
n_stu = len ( next ( iter ( stu_res . values ( ) ) ) )
if index >= n_sol :
raise BaseException ( "There are only {} rows in the solution query result, and you're trying to fetch the row at index {}" . format ( n_sol , index ) )
if index >= n_stu :
_msg = state . build_message ( missing_msg , fmt_kwargs = msg_kwargs )
state . do_test ( _msg )
return state . to_child ( append_message = { "msg" : expand_msg , "kwargs" : msg_kwargs } , student_result = { k : [ v [ index ] ] for k , v in stu_res . items ( ) } , solution_result = { k : [ v [ index ] ] for k , v in sol_res . items ( ) } , ) |
def set_path ( self , path , val ) :
"""Set the given value at the supplied path where path is either
a tuple of strings or a string in A . B . C format .""" | path = tuple ( path . split ( '.' ) ) if isinstance ( path , str ) else tuple ( path )
disallowed = [ p for p in path if not type ( self ) . _sanitizer . allowable ( p ) ]
if any ( disallowed ) :
raise Exception ( "Attribute strings in path elements cannot be " "correctly escaped : %s" % ',' . join ( repr ( el ) for el in disallowed ) )
if len ( path ) > 1 :
attrtree = self . __getattr__ ( path [ 0 ] )
attrtree . set_path ( path [ 1 : ] , val )
else :
self . __setattr__ ( path [ 0 ] , val ) |
def build_git_url ( self ) :
"""get build git url .
: return : build git url or None if not found""" | # pylint : disable = len - as - condition
if len ( self . dutinformation ) > 0 and ( self . dutinformation . get ( 0 ) . build is not None ) :
return self . dutinformation . get ( 0 ) . build . giturl
return None |
def titleize ( word ) :
"""Capitalize all the words and replace some characters in the string to
create a nicer looking title . : func : ` titleize ` is meant for creating pretty
output .
Examples : :
> > > titleize ( " man from the boondocks " )
" Man From The Boondocks "
> > > titleize ( " x - men : the last stand " )
" X Men : The Last Stand "
> > > titleize ( " TheManWithoutAPast " )
" The Man Without A Past "
> > > titleize ( " raiders _ of _ the _ lost _ ark " )
" Raiders Of The Lost Ark " """ | return re . sub ( r"\b('?[a-z])" , lambda match : match . group ( 1 ) . capitalize ( ) , humanize ( underscore ( word ) ) ) |
def affine ( self , pixelbuffer = 0 ) :
"""Return an Affine object of tile .
- pixelbuffer : tile buffer in pixels""" | return Affine ( self . pixel_x_size , 0 , self . bounds ( pixelbuffer ) . left , 0 , - self . pixel_y_size , self . bounds ( pixelbuffer ) . top ) |
def register_schema ( self , directory , path ) :
"""Register a json - schema .
: param directory : root directory path .
: param path : schema path , relative to the root directory .""" | self . schemas [ path ] = os . path . abspath ( directory ) |
def description ( self ) :
'''Provide a description for each algorithm available , useful to print in ecc file''' | if 0 < self . algo <= 3 :
return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) with generator=%s, prime poly=%s and first consecutive root=%s." % ( self . field_charac , self . c_exp , self . gen_nb , hex ( self . prim ) , self . fcr )
elif self . algo == 4 :
return "Reed-Solomon with polynomials in Galois field of characteristic %i (2^%i) under US FAA ADSB UAT RS FEC standard with generator=%s, prime poly=%s and first consecutive root=%s." % ( self . field_charac , self . c_exp , self . gen_nb , hex ( self . prim ) , self . fcr )
else :
return "No description for this ECC algorithm." |
def delete ( self ) :
"""Delete this table .
For example :
. . literalinclude : : snippets _ table . py
: start - after : [ START bigtable _ delete _ table ]
: end - before : [ END bigtable _ delete _ table ]""" | table_client = self . _instance . _client . table_admin_client
table_client . delete_table ( name = self . name ) |
def pop ( self , key , default = None ) :
"""Get item from the dict and remove it .
Return default if expired or does not exist . Never raise KeyError .""" | with self . lock :
try :
item = OrderedDict . __getitem__ ( self , key )
del self [ key ]
return item [ 0 ]
except KeyError :
return default |
def flush ( self ) :
"""Flush the write buffers of the stream if applicable and
save the object on the cloud .""" | if self . _writable :
with self . _seek_lock :
buffer = self . _get_buffer ( )
# Flush that part of the file
end = self . _seek
start = end - len ( buffer )
# Clear buffer
self . _write_buffer = bytearray ( )
# Flush content
with handle_os_exceptions ( ) :
self . _flush ( buffer , start , end ) |
def occurrence_view ( request , event_pk , pk , template = 'swingtime/occurrence_detail.html' , form_class = forms . SingleOccurrenceForm ) :
'''View a specific occurrence and optionally handle any updates .
Context parameters :
` ` occurrence ` `
the occurrence object keyed by ` ` pk ` `
` ` form ` `
a form object for updating the occurrence''' | occurrence = get_object_or_404 ( Occurrence , pk = pk , event__pk = event_pk )
if request . method == 'POST' :
form = form_class ( request . POST , instance = occurrence )
if form . is_valid ( ) :
form . save ( )
return http . HttpResponseRedirect ( request . path )
else :
form = form_class ( instance = occurrence )
return render ( request , template , { 'occurrence' : occurrence , 'form' : form } ) |
def get_tree ( cls , session = None , json = False , json_fields = None , query = None ) :
"""This method generate tree of current node table in dict or json
format . You can make custom query with attribute ` ` query ` ` . By default
it return all nodes in table .
Args :
session ( : mod : ` sqlalchemy . orm . session . Session ` ) : SQLAlchemy session
Kwargs :
json ( bool ) : if True return JSON jqTree format
json _ fields ( function ) : append custom fields in JSON
query ( function ) : it takes : class : ` sqlalchemy . orm . query . Query `
object as an argument , and returns in a modified form
def query ( nodes ) :
return nodes . filter ( node . _ _ class _ _ . tree _ id . is _ ( node . tree _ id ) )
node . get _ tree ( session = DBSession , json = True , query = query )
Example :
* : mod : ` sqlalchemy _ mptt . tests . cases . get _ tree . test _ get _ tree `
* : mod : ` sqlalchemy _ mptt . tests . cases . get _ tree . test _ get _ json _ tree `
* : mod : ` sqlalchemy _ mptt . tests . cases . get _ tree . test _ get _ json _ tree _ with _ custom _ field `""" | # noqa
tree = [ ]
nodes_of_level = { }
# handle custom query
nodes = cls . _base_query ( session )
if query :
nodes = query ( nodes )
nodes = cls . _base_order ( nodes ) . all ( )
# search minimal level of nodes .
min_level = min ( [ node . level for node in nodes ] or [ None ] )
def get_node_id ( node ) :
return getattr ( node , node . get_pk_name ( ) )
for node in nodes :
result = cls . _node_to_dict ( node , json , json_fields )
parent_id = node . parent_id
if node . level != min_level : # for cildren
# Find parent in the tree
if parent_id not in nodes_of_level . keys ( ) :
continue
if 'children' not in nodes_of_level [ parent_id ] :
nodes_of_level [ parent_id ] [ 'children' ] = [ ]
# Append node to parent
nl = nodes_of_level [ parent_id ] [ 'children' ]
nl . append ( result )
nodes_of_level [ get_node_id ( node ) ] = nl [ - 1 ]
else : # for top level nodes
tree . append ( result )
nodes_of_level [ get_node_id ( node ) ] = tree [ - 1 ]
return tree |
def set ( self , num ) :
"""Sets the current value equal to num""" | self . _value = coord . Angle ( num , unit = u . deg )
self . _variable . set ( self . as_string ( ) ) |
def set_options ( self , ** kwargs ) :
"""Set the options . Existing value will persist
: param kwargs :
: return :""" | options = self . options
options . update ( kwargs )
self . update ( options = options ) |
def _send_http_request ( self , xml_request ) :
"""Send a request via HTTP protocol .
Args :
xml _ request - - A fully formed xml request string for the CPS .
Returns :
The raw xml response string .""" | headers = { "Host" : self . _host , "Content-Type" : "text/xml" , "Recipient" : self . _storage }
try : # Retry once if failed in case the socket has just gone bad .
self . _connection . request ( "POST" , self . _selector_url , xml_request , headers )
response = self . _connection . getresponse ( )
except ( httplib . CannotSendRequest , httplib . BadStatusLine ) :
Debug . warn ( "\nRestarting socket, resending message!" )
self . _open_connection ( )
self . _connection . request ( "POST" , self . _selector_url , xml_request , headers )
response = self . _connection . getresponse ( )
data = response . read ( )
return data |
def RybToRgb ( hue ) :
'''Maps a hue on Itten ' s RYB color wheel to the standard RGB wheel .
Parameters :
: hue :
The hue on Itten ' s RYB color wheel [ 0 . . . 360]
Returns :
An approximation of the corresponding hue on the standard RGB wheel .
> > > Color . RybToRgb ( 15)
8.0''' | d = hue % 15
i = int ( hue / 15 )
x0 = _RgbWheel [ i ]
x1 = _RgbWheel [ i + 1 ]
return x0 + ( x1 - x0 ) * d / 15 |
def SaveResourceUsage ( self , status ) :
"""Method to tally resources .""" | user_cpu = status . cpu_time_used . user_cpu_time
system_cpu = status . cpu_time_used . system_cpu_time
self . rdf_flow . cpu_time_used . user_cpu_time += user_cpu
self . rdf_flow . cpu_time_used . system_cpu_time += system_cpu
self . rdf_flow . network_bytes_sent += status . network_bytes_sent
if self . rdf_flow . cpu_limit :
user_cpu_total = self . rdf_flow . cpu_time_used . user_cpu_time
system_cpu_total = self . rdf_flow . cpu_time_used . system_cpu_time
if self . rdf_flow . cpu_limit < ( user_cpu_total + system_cpu_total ) : # We have exceeded our limit , stop this flow .
raise flow . FlowError ( "CPU limit exceeded for {} {}." . format ( self . rdf_flow . flow_class_name , self . rdf_flow . flow_id ) )
if ( self . rdf_flow . network_bytes_limit and self . rdf_flow . network_bytes_limit < self . rdf_flow . network_bytes_sent ) : # We have exceeded our byte limit , stop this flow .
raise flow . FlowError ( "Network bytes limit exceeded {} {}." . format ( self . rdf_flow . flow_class_name , self . rdf_flow . flow_id ) ) |
def ToDebugString ( self , indentation_level = 1 ) :
"""Converts the path filter scan tree node into a debug string .
Args :
indentation _ level : an integer containing the text indentation level .
Returns :
A string containing a debug representation of the path filter scan
tree node .""" | indentation = ' ' * indentation_level
text_parts = [ '{0:s}path segment index: {1:d}\n' . format ( indentation , self . path_segment_index ) ]
for path_segment , scan_object in self . _path_segments . items ( ) :
text_parts . append ( '{0:s}path segment: {1:s}\n' . format ( indentation , path_segment ) )
if isinstance ( scan_object , PathFilterScanTreeNode ) :
text_parts . append ( '{0:s}scan tree node:\n' . format ( indentation ) )
text_parts . append ( scan_object . ToDebugString ( indentation_level + 1 ) )
elif isinstance ( scan_object , py2to3 . STRING_TYPES ) :
text_parts . append ( '{0:s}path: {1:s}\n' . format ( indentation , scan_object ) )
text_parts . append ( '{0:s}default value:\n' . format ( indentation ) )
if isinstance ( self . default_value , PathFilterScanTreeNode ) :
text_parts . append ( '{0:s}scan tree node:\n' . format ( indentation ) )
text_parts . append ( self . default_value . ToDebugString ( indentation_level + 1 ) )
elif isinstance ( self . default_value , py2to3 . STRING_TYPES ) :
text_parts . append ( '{0:s}pattern: {1:s}\n' . format ( indentation , self . default_value ) )
text_parts . append ( '\n' )
return '' . join ( text_parts ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.