signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def create_model ( schema , collection , class_name = None ) :
"""Main entry point to creating a new mongothon model . Both
schema and Pymongo collection objects must be provided .
Returns a new class which can be used as a model class .
The class name of the model class by default is inferred
from the provided collection ( converted to camel case ) .
Optionally , a class _ name argument can be provided to
override this .""" | if not class_name :
class_name = camelize ( str ( collection . name ) )
model_class = type ( class_name , ( Model , ) , dict ( schema = schema , _collection_factory = staticmethod ( lambda : collection ) ) )
# Since we are dynamically creating this class here , we modify _ _ module _ _ on the
# created class to point back to the module from which ` create _ model ` was called
model_class . __module__ = _module_name_from_previous_frame ( 1 )
return model_class |
def progress ( UserUser_presence = 0 ) :
"""PROGRESS Section 9.3.17""" | a = TpPd ( pd = 0x3 )
b = MessageType ( mesType = 0x3 )
# 0000011
c = ProgressIndicator ( )
packet = a / b / c
if UserUser_presence is 1 :
d = UserUserHdr ( )
packet = packet / d
return packet |
def can ( self , permission , obj , ** kwargs ) :
"""Check if we can do something with an object .
: param permission : The permission to look for .
: param obj : The object to check the ACL of .
: param * * kwargs : The context to pass to predicates .
> > > auth . can ( ' read ' , some _ object )
> > > auth . can ( ' write ' , another _ object , group = some _ group )""" | context = { 'user' : current_user }
for func in self . context_processors :
context . update ( func ( ) )
context . update ( get_object_context ( obj ) )
context . update ( kwargs )
return check ( permission , iter_object_acl ( obj ) , ** context ) |
def _compute_derivatives ( self ) :
"""Compute derivatives of the time series .""" | derivatives = [ ]
for i , ( timestamp , value ) in enumerate ( self . time_series_items ) :
if i > 0 :
pre_item = self . time_series_items [ i - 1 ]
pre_timestamp = pre_item [ 0 ]
pre_value = pre_item [ 1 ]
td = timestamp - pre_timestamp
derivative = ( value - pre_value ) / td if td != 0 else value - pre_value
derivative = abs ( derivative )
derivatives . append ( derivative )
# First timestamp is assigned the same derivative as the second timestamp .
if derivatives :
derivatives . insert ( 0 , derivatives [ 0 ] )
self . derivatives = derivatives |
def solveConsIndShock ( solution_next , IncomeDstn , LivPrb , DiscFac , CRRA , Rfree , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool ) :
'''Solves a single period consumption - saving problem with CRRA utility and risky
income ( subject to permanent and transitory shocks ) . Can generate a value
function if requested ; consumption function can be linear or cubic splines .
Parameters
solution _ next : ConsumerSolution
The solution to next period ' s one period problem .
IncomeDstn : [ np . array ]
A list containing three arrays of floats , representing a discrete
approximation to the income process between the period being solved
and the one immediately following ( in solution _ next ) . Order : event
probabilities , permanent shocks , transitory shocks .
LivPrb : float
Survival probability ; likelihood of being alive at the beginning of
the succeeding period .
DiscFac : float
Intertemporal discount factor for future utility .
CRRA : float
Coefficient of relative risk aversion .
Rfree : float
Risk free interest factor on end - of - period assets .
PermGroFac : float
Expected permanent income growth factor at the end of this period .
BoroCnstArt : float or None
Borrowing constraint for the minimum allowable assets to end the
period with . If it is less than the natural borrowing constraint ,
then it is irrelevant ; BoroCnstArt = None indicates no artificial bor -
rowing constraint .
aXtraGrid : np . array
Array of " extra " end - of - period asset values - - assets above the
absolute minimum acceptable level .
vFuncBool : boolean
An indicator for whether the value function should be computed and
included in the reported solution .
CubicBool : boolean
Indicator for whether the solver should use cubic or linear interpolation .
Returns
solution _ now : ConsumerSolution
The solution to the single period consumption - saving problem . Includes
a consumption function cFunc ( using cubic or linear splines ) , a marginal
value function vPfunc , a minimum acceptable level of normalized market
resources mNrmMin , normalized human wealth hNrm , and bounding MPCs MPCmin
and MPCmax . It might also have a value function vFunc and marginal mar -
ginal value function vPPfunc .''' | # Use the basic solver if user doesn ' t want cubic splines or the value function
if ( not CubicBool ) and ( not vFuncBool ) :
solver = ConsIndShockSolverBasic ( solution_next , IncomeDstn , LivPrb , DiscFac , CRRA , Rfree , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool )
else : # Use the " advanced " solver if either is requested
solver = ConsIndShockSolver ( solution_next , IncomeDstn , LivPrb , DiscFac , CRRA , Rfree , PermGroFac , BoroCnstArt , aXtraGrid , vFuncBool , CubicBool )
solver . prepareToSolve ( )
# Do some preparatory work
solution_now = solver . solve ( )
# Solve the period
return solution_now |
def update_build_configuration_set ( id , ** kwargs ) :
"""Update a BuildConfigurationSet""" | data = update_build_configuration_set_raw ( id , ** kwargs )
if data :
return utils . format_json ( data ) |
def get_public_events ( self ) :
""": calls : ` GET / users / : user / events / public < http : / / developer . github . com / v3 / activity / events > ` _
: rtype : : class : ` github . PaginatedList . PaginatedList ` of : class : ` github . Event . Event `""" | return github . PaginatedList . PaginatedList ( github . Event . Event , self . _requester , self . url + "/events/public" , None ) |
def get_template ( template_dict , parameter_overrides = None ) :
"""Given a SAM template dictionary , return a cleaned copy of the template where SAM plugins have been run
and parameter values have been substituted .
Parameters
template _ dict : dict
unprocessed SAM template dictionary
parameter _ overrides : dict
Optional dictionary of values for template parameters
Returns
dict
Processed SAM template""" | template_dict = template_dict or { }
if template_dict :
template_dict = SamTranslatorWrapper ( template_dict ) . run_plugins ( )
template_dict = SamBaseProvider . _resolve_parameters ( template_dict , parameter_overrides )
ResourceMetadataNormalizer . normalize ( template_dict )
return template_dict |
def _search_for_import_symbols ( self , matches ) :
'''Just encapsulating a search that takes place fairly often''' | # Sanity check
if not hasattr ( self . pefile_handle , 'DIRECTORY_ENTRY_IMPORT' ) :
return [ ]
# Find symbols that match
pattern = '|' . join ( re . escape ( match ) for match in matches )
exp = re . compile ( pattern )
symbol_list = [ ]
for module in self . pefile_handle . DIRECTORY_ENTRY_IMPORT :
for symbol in module . imports :
if ( symbol . name ) :
symbol_list . append ( symbol . name . lower ( ) )
symbol_matches = [ ]
for symbol in symbol_list :
if exp . search ( symbol ) :
symbol_matches . append ( symbol )
return symbol_matches |
def load ( self , code , setup = '' , teardown = '' ) :
"""Prepares a set of setup , test , and teardown code to be
run in the console .
PARAMETERS :
code - - list ; processed lines of code . Elements in the list are
either strings ( input ) or CodeAnswer objects ( output )
setup - - str ; raw setup code
teardown - - str ; raw teardown code""" | super ( ) . load ( code , setup , teardown )
self . _frame = self . _original_frame . copy ( ) |
def get_blank_row ( self , filler = "-" , splitter = "+" ) :
"""Gets blank row
: param filler : Fill empty columns with this char
: param splitter : Separate columns with this char
: return : Pretty formatted blank row ( with no meaningful data in it )""" | return self . get_pretty_row ( [ "" for _ in self . widths ] , # blanks
filler , # fill with this
splitter , # split columns with this
) |
def load_commodities ( self ) :
"""Load the commodities for Amounts in this object .""" | if isinstance ( self . available , Amount ) :
self . available = Amount ( "{0:.8f} {1}" . format ( self . available . to_double ( ) , self . currency ) )
else :
self . available = Amount ( "{0:.8f} {1}" . format ( self . available , self . currency ) )
if isinstance ( self . total , Amount ) :
self . total = Amount ( "{0:.8f} {1}" . format ( self . total . to_double ( ) , self . currency ) )
else :
self . total = Amount ( "{0:.8f} {1}" . format ( self . total , self . currency ) ) |
def _split_multiline_prompt ( get_prompt_tokens ) :
"""Take a ` get _ prompt _ tokens ` function and return three new functions instead .
One that tells whether this prompt consists of multiple lines ; one that
returns the tokens to be shown on the lines above the input ; and another
one with the tokens to be shown at the first line of the input .""" | def has_before_tokens ( cli ) :
for token , char in get_prompt_tokens ( cli ) :
if '\n' in char :
return True
return False
def before ( cli ) :
result = [ ]
found_nl = False
for token , char in reversed ( explode_tokens ( get_prompt_tokens ( cli ) ) ) :
if found_nl :
result . insert ( 0 , ( token , char ) )
elif char == '\n' :
found_nl = True
return result
def first_input_line ( cli ) :
result = [ ]
for token , char in reversed ( explode_tokens ( get_prompt_tokens ( cli ) ) ) :
if char == '\n' :
break
else :
result . insert ( 0 , ( token , char ) )
return result
return has_before_tokens , before , first_input_line |
def get_multifile_object_child_location ( self , parent_item_prefix : str , child_name : str ) -> str :
"""Implementation of the parent abstract method .
In this mode the attribute is a file inside the parent object folder
: param parent _ item _ prefix : the absolute file prefix of the parent item .
: return : the file prefix for this attribute""" | check_var ( parent_item_prefix , var_types = str , var_name = 'parent_item_prefix' )
check_var ( child_name , var_types = str , var_name = 'item_name' )
# assert that folder _ path is a folder
if not isdir ( parent_item_prefix ) :
raise ValueError ( 'Cannot get attribute item in non-flat mode, parent item path is not a folder : ' + parent_item_prefix )
return join ( parent_item_prefix , child_name ) |
def value_counts ( values , sort = True , ascending = False , normalize = False , bins = None , dropna = True ) :
"""Compute a histogram of the counts of non - null values .
Parameters
values : ndarray ( 1 - d )
sort : boolean , default True
Sort by values
ascending : boolean , default False
Sort in ascending order
normalize : boolean , default False
If True then compute a relative histogram
bins : integer , optional
Rather than count values , group them into half - open bins ,
convenience for pd . cut , only works with numeric data
dropna : boolean , default True
Don ' t include counts of NaN
Returns
value _ counts : Series""" | from pandas . core . series import Series , Index
name = getattr ( values , 'name' , None )
if bins is not None :
try :
from pandas . core . reshape . tile import cut
values = Series ( values )
ii = cut ( values , bins , include_lowest = True )
except TypeError :
raise TypeError ( "bins argument only works with numeric data." )
# count , remove nulls ( from the index ) , and but the bins
result = ii . value_counts ( dropna = dropna )
result = result [ result . index . notna ( ) ]
result . index = result . index . astype ( 'interval' )
result = result . sort_index ( )
# if we are dropna and we have NO values
if dropna and ( result . values == 0 ) . all ( ) :
result = result . iloc [ 0 : 0 ]
# normalizing is by len of all ( regardless of dropna )
counts = np . array ( [ len ( ii ) ] )
else :
if is_extension_array_dtype ( values ) or is_sparse ( values ) : # handle Categorical and sparse ,
result = Series ( values ) . _values . value_counts ( dropna = dropna )
result . name = name
counts = result . values
else :
keys , counts = _value_counts_arraylike ( values , dropna )
if not isinstance ( keys , Index ) :
keys = Index ( keys )
result = Series ( counts , index = keys , name = name )
if sort :
result = result . sort_values ( ascending = ascending )
if normalize :
result = result / float ( counts . sum ( ) )
return result |
def dict ( self , ** kwargs ) :
"""Dictionary representation .""" | return dict ( time = self . timestamp , address = self . address , channel = self . channel , value = self . value , ** kwargs ) |
def check_int ( integer ) :
"""Check if number is integer or not .
: param integer : Number as str
: return : Boolean""" | if not isinstance ( integer , str ) :
return False
if integer [ 0 ] in ( '-' , '+' ) :
return integer [ 1 : ] . isdigit ( )
return integer . isdigit ( ) |
def luks_cleartext_holder ( self ) :
"""Get wrapper to the unlocked luks cleartext device .""" | if not self . is_luks :
return None
for device in self . _daemon :
if device . luks_cleartext_slave == self :
return device
return None |
def get_destinations ( cls , domain , source ) :
"""Retrieve forward information .""" | forwards = cls . list ( domain , { 'items_per_page' : 500 } )
for fwd in forwards :
if fwd [ 'source' ] == source :
return fwd [ 'destinations' ]
return [ ] |
def validate_packet ( packet ) :
"""Check if packet is valid OF packet .
Raises :
UnpackException : If the packet is invalid .""" | if not isinstance ( packet , bytes ) :
raise UnpackException ( 'invalid packet' )
packet_length = len ( packet )
if packet_length < 8 or packet_length > 2 ** 16 :
raise UnpackException ( 'invalid packet' )
if packet_length != int . from_bytes ( packet [ 2 : 4 ] , byteorder = 'big' ) :
raise UnpackException ( 'invalid packet' )
version = packet [ 0 ]
if version == 0 or version >= 128 :
raise UnpackException ( 'invalid packet' ) |
def get_output_metadata ( self , path , filename ) :
"""Describe a file by its metadata .
: return : dict""" | checksums = get_checksums ( path , [ 'md5' ] )
metadata = { 'filename' : filename , 'filesize' : os . path . getsize ( path ) , 'checksum' : checksums [ 'md5sum' ] , 'checksum_type' : 'md5' }
if self . metadata_only :
metadata [ 'metadata_only' ] = True
return metadata |
def pop_choice ( params : Dict [ str , Any ] , key : str , choices : List [ Any ] , default_to_first_choice : bool = False , history : str = "?." ) -> Any :
"""Performs the same function as : func : ` Params . pop _ choice ` , but is required in order to deal with
places that the Params object is not welcome , such as inside Keras layers . See the docstring
of that method for more detail on how this function works .
This method adds a ` ` history ` ` parameter , in the off - chance that you know it , so that we can
reproduce : func : ` Params . pop _ choice ` exactly . We default to using " ? . " if you don ' t know the
history , so you ' ll have to fix that in the log if you want to actually recover the logged
parameters .""" | value = Params ( params , history ) . pop_choice ( key , choices , default_to_first_choice )
return value |
def shadow_copy ( self ) :
"""Return a copy of the resource with same raw data
: return : copy of the resource""" | ret = self . __class__ ( )
if not self . _is_updated ( ) : # before copy , make sure source is updated .
self . update ( )
ret . _parsed_resource = self . _parsed_resource
return ret |
def post ( self , request , * args , ** kwargs ) :
"""Method for handling POST requests .
Checks for a modify confirmation and performs
the action by calling ` process _ action ` .""" | queryset = self . get_selected ( request )
if request . POST . get ( 'modify' ) :
response = self . process_action ( request , queryset )
if not response :
url = self . get_done_url ( )
return self . render ( request , redirect_url = url )
else :
return response
else :
return self . render ( request , redirect_url = request . build_absolute_uri ( ) ) |
def sg_aconv ( tensor , opt ) :
r"""Applies a 2 - D atrous ( or dilated ) convolution .
Args :
tensor : A 4 - D ` Tensor ` ( automatically passed by decorator ) .
opt :
size : A tuple / list of positive integers of length 2 representing ` [ kernel height , kernel width ] ` .
Can be an integer if both values are the same .
If not specified , ( 3 , 3 ) is set automatically .
rate : A positive integer . The stride with which we sample input values across
the ` height ` and ` width ` dimensions . Default is 2.
in _ dim : A positive ` integer ` . The size of input dimension .
dim : A positive ` integer ` . The size of output dimension .
pad : Either ` SAME ` ( Default ) or ` VALID ` .
bias : Boolean . If True , biases are added .
regularizer : A ( Tensor - > Tensor or None ) function ; the result of applying it on a newly created variable
will be added to the collection tf . GraphKeys . REGULARIZATION _ LOSSES and can be used for regularization
summary : If True , summaries are added . The default is True .
Returns :
A ` Tensor ` with the same type as ` tensor ` .""" | # default options
opt += tf . sg_opt ( size = ( 3 , 3 ) , rate = 2 , pad = 'SAME' )
opt . size = opt . size if isinstance ( opt . size , ( tuple , list ) ) else [ opt . size , opt . size ]
# parameter tf . sg _ initializer
w = tf . sg_initializer . he_uniform ( 'W' , ( opt . size [ 0 ] , opt . size [ 1 ] , opt . in_dim , opt . dim ) , regularizer = opt . regularizer , summary = opt . summary )
b = tf . sg_initializer . constant ( 'b' , opt . dim , summary = opt . summary ) if opt . bias else 0
# apply convolution
out = tf . nn . atrous_conv2d ( tensor , w , rate = opt . rate , padding = opt . pad ) + b
return out |
def users_me_merge ( self , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / users # merge - self - with - another - user" | api_path = "/api/v2/users/me/merge.json"
return self . call ( api_path , method = "PUT" , data = data , ** kwargs ) |
def statistics ( self ) :
"""Access the statistics
: returns : twilio . rest . taskrouter . v1 . workspace . worker . worker _ statistics . WorkerStatisticsList
: rtype : twilio . rest . taskrouter . v1 . workspace . worker . worker _ statistics . WorkerStatisticsList""" | if self . _statistics is None :
self . _statistics = WorkerStatisticsList ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , worker_sid = self . _solution [ 'sid' ] , )
return self . _statistics |
def matplot ( x , y , z , ax = None , colorbar = True , ** kwargs ) :
"""Plot x , y , z as expected with correct axis labels .
Examples
> > > import matplotlib . pyplot as plt
> > > import numpy as np
> > > from reda . plotters import matplot
> > > a = np . arange ( 4)
> > > b = np . arange ( 3 ) + 3
> > > def sum ( a , b ) :
. . . return a + b
> > > x , y = np . meshgrid ( a , b )
> > > c = sum ( x , y )
> > > fig , ( ax1 , ax2 ) = plt . subplots ( 1 , 2)
> > > im = ax1 . pcolormesh ( x , y , c )
> > > _ = plt . colorbar ( im , ax = ax1)
> > > _ = ax1 . set _ title ( " plt . pcolormesh " )
> > > _ , _ = matplot ( x , y , c , ax = ax2)
> > > _ = ax2 . set _ title ( " reda . plotters . matplot " )
> > > fig . show ( )
Note
Only works for equidistant data at the moment .""" | xmin = x . min ( )
xmax = x . max ( )
dx = np . abs ( x [ 0 , 1 ] - x [ 0 , 0 ] )
ymin = y . min ( )
ymax = y . max ( )
dy = np . abs ( y [ 1 , 0 ] - y [ 0 , 0 ] )
x2 , y2 = np . meshgrid ( np . arange ( xmin , xmax + 2 * dx , dx ) - dx / 2. , np . arange ( ymin , ymax + 2 * dy , dy ) - dy / 2. )
if not ax :
fig , ax = plt . subplots ( )
else :
fig = ax . figure
im = ax . pcolormesh ( x2 , y2 , z , ** kwargs )
ax . axis ( [ x2 . min ( ) , x2 . max ( ) , y2 . min ( ) , y2 . max ( ) ] )
ax . set_xticks ( np . arange ( xmin , xmax + dx , dx ) )
ax . set_yticks ( np . arange ( ymin , ymax + dx , dy ) )
if colorbar :
cbar = fig . colorbar ( im , ax = ax )
else :
cbar = None
return ax , cbar |
def serialize ( self , value , greedy = True ) :
"""Greedy serialization requires the value to either be a column
or convertible to a column , whereas non - greedy serialization
will pass through any string as - is and will only serialize
Column objects .
Non - greedy serialization is useful when preparing queries with
custom filters or segments .""" | if greedy and not isinstance ( value , Column ) :
value = self . normalize ( value )
if isinstance ( value , Column ) :
return value . id
else :
return value |
def process_request_body ( fn ) :
'''A decorator to skip a processor function if process _ request _ body is False''' | @ functools . wraps ( fn )
def wrapped ( * args , ** kwargs ) : # pylint : disable = C0111
if cherrypy . request . process_request_body is not False :
fn ( * args , ** kwargs )
return wrapped |
def process_date_from_to_options ( options , to_datetime = False , default_dt_to = False ) :
"""to _ datetime - приводить ли date к datetime
default _ dt _ to - устанавливать заведомо будущее дефолтное значение для dt _ to""" | start_time = datetime . datetime . now ( )
if options . get ( 'last_week' ) :
dt_from = start_time - datetime . timedelta ( days = 7 )
dt_to = start_time
elif options . get ( 'last_day' ) :
dt_from = start_time - datetime . timedelta ( days = 1 )
dt_to = start_time
elif options . get ( 'last_2hours' ) :
dt_from = start_time - datetime . timedelta ( hours = 2 )
dt_to = start_time
else :
from_str = options . get ( 'from' )
if from_str :
try :
dt_from = iso_to_datetime ( from_str )
except :
dt_from = iso_to_date ( from_str )
else :
dt_from = None
to_str = options . get ( 'to' )
if to_str :
try :
dt_to = iso_to_datetime ( to_str )
except :
dt_to = iso_to_date ( to_str )
else :
dt_to = None
if default_dt_to and not dt_to :
dt_to = datetime . datetime ( 2100 , 1 , 1 )
if to_datetime :
if isinstance ( dt_from , datetime . date ) :
dt_from = date_to_datetime ( dt_from )
if isinstance ( dt_to , datetime . date ) :
dt_to = date_to_datetime_lte ( dt_to )
return dt_from , dt_to |
def servicegroup_server_exists ( sg_name , s_name , s_port = None , ** connection_args ) :
'''Check if a server : port combination is a member of a servicegroup
CLI Example :
. . code - block : : bash
salt ' * ' netscaler . servicegroup _ server _ exists ' serviceGroupName ' ' serverName ' ' serverPort ' ''' | return _servicegroup_get_server ( sg_name , s_name , s_port , ** connection_args ) is not None |
def throttle ( self , wait ) :
"""Returns a function , that , when invoked , will only be triggered
at most once during a given window of time .""" | ns = self . Namespace ( )
ns . timeout = None
ns . throttling = None
ns . more = None
ns . result = None
def done ( ) :
ns . more = ns . throttling = False
whenDone = _ . debounce ( done , wait )
wait = ( float ( wait ) / float ( 1000 ) )
def throttled ( * args , ** kwargs ) :
def later ( ) :
ns . timeout = None
if ns . more :
self . obj ( * args , ** kwargs )
whenDone ( )
if not ns . timeout :
ns . timeout = Timer ( wait , later )
ns . timeout . start ( )
if ns . throttling :
ns . more = True
else :
ns . throttling = True
ns . result = self . obj ( * args , ** kwargs )
whenDone ( )
return ns . result
return self . _wrap ( throttled ) |
def _convert_timedelta_to_seconds ( timedelta ) :
"""Returns the total seconds calculated from the supplied timedelta .
( Function provided to enable running on Python 2.6 which lacks timedelta . total _ seconds ( ) ) .""" | days_in_seconds = timedelta . days * 24 * 3600
return int ( ( timedelta . microseconds + ( timedelta . seconds + days_in_seconds ) * 10 ** 6 ) / 10 ** 6 ) |
def purge_queues ( queues = None ) :
"""Purge the given queues .""" | current_queues . purge ( queues = queues )
click . secho ( 'Queues {} have been purged.' . format ( queues or current_queues . queues . keys ( ) ) , fg = 'green' ) |
def _pack3 ( obj , fp , ** options ) :
"""Serialize a Python object into MessagePack bytes .
Args :
obj : a Python object
fp : a . write ( ) - supporting file - like object
Kwargs :
ext _ handlers ( dict ) : dictionary of Ext handlers , mapping a custom type
to a callable that packs an instance of the type
into an Ext object
force _ float _ precision ( str ) : " single " to force packing floats as
IEEE - 754 single - precision floats ,
" double " to force packing floats as
IEEE - 754 double - precision floats .
Returns :
None .
Raises :
UnsupportedType ( PackException ) :
Object type not supported for packing .
Example :
> > > f = open ( ' test . bin ' , ' wb ' )
> > > umsgpack . pack ( { u " compact " : True , u " schema " : 0 } , f )""" | global compatibility
ext_handlers = options . get ( "ext_handlers" )
if obj is None :
_pack_nil ( obj , fp , options )
elif ext_handlers and obj . __class__ in ext_handlers :
_pack_ext ( ext_handlers [ obj . __class__ ] ( obj ) , fp , options )
elif isinstance ( obj , bool ) :
_pack_boolean ( obj , fp , options )
elif isinstance ( obj , int ) :
_pack_integer ( obj , fp , options )
elif isinstance ( obj , float ) :
_pack_float ( obj , fp , options )
elif compatibility and isinstance ( obj , str ) :
_pack_oldspec_raw ( obj . encode ( 'utf-8' ) , fp , options )
elif compatibility and isinstance ( obj , bytes ) :
_pack_oldspec_raw ( obj , fp , options )
elif isinstance ( obj , str ) :
_pack_string ( obj , fp , options )
elif isinstance ( obj , bytes ) :
_pack_binary ( obj , fp , options )
elif isinstance ( obj , ( list , tuple ) ) :
_pack_array ( obj , fp , options )
elif isinstance ( obj , dict ) :
_pack_map ( obj , fp , options )
elif isinstance ( obj , datetime . datetime ) :
_pack_ext_timestamp ( obj , fp , options )
elif isinstance ( obj , Ext ) :
_pack_ext ( obj , fp , options )
elif ext_handlers : # Linear search for superclass
t = next ( ( t for t in ext_handlers . keys ( ) if isinstance ( obj , t ) ) , None )
if t :
_pack_ext ( ext_handlers [ t ] ( obj ) , fp , options )
else :
raise UnsupportedTypeException ( "unsupported type: %s" % str ( type ( obj ) ) )
else :
raise UnsupportedTypeException ( "unsupported type: %s" % str ( type ( obj ) ) ) |
def _on_loop_start ( self , variables ) :
"""performs on - loop - start actions like callbacks
variables contains local namespace variables .
Parameters
variables : dict of available variables
Returns
None""" | for callback in self . callbacks :
if hasattr ( callback , 'on_loop_start' ) :
self . logs_ [ str ( callback ) ] . append ( callback . on_loop_start ( ** variables ) ) |
def paste_as ( self , key , data ) :
"""Paste and transform data
Data may be given as a Python code as well as a tab separated
multi - line strings similar to paste .""" | def error_msg ( err ) :
msg = _ ( "Error evaluating data: " ) + str ( err )
post_command_event ( self . main_window , self . StatusBarMsg , text = msg )
interfaces = self . main_window . interfaces
key = self . main_window . grid . actions . cursor
try :
obj = ast . literal_eval ( data )
except ( SyntaxError , AttributeError ) : # This is no Python code so te try to interpret it as paste data
try :
obj = [ map ( ast . literal_eval , line . split ( "\t" ) ) for line in data . split ( "\n" ) ]
except Exception , err : # This must just be text .
try :
obj = [ line . split ( '\t' ) for line in data . split ( '\n' ) ]
except Exception , err : # Now I really have no idea
error_msg ( err )
return
except ValueError , err :
error_msg ( err )
return
parameters = interfaces . get_pasteas_parameters_from_user ( obj )
if parameters is None : # Dialog aborted
return
paste_data = self . _get_pasteas_data ( parameters [ "dim" ] , obj )
if parameters [ "transpose" ] :
paste_data = zip ( * paste_data )
self . main_window . grid . actions . paste ( key , paste_data , freq = 1000 ) |
def percent_encode_plus ( text , encode_set = QUERY_ENCODE_SET , encoding = 'utf-8' ) :
'''Percent encode text for query strings .
Unlike Python ' s ` ` quote _ plus ` ` , this function accepts a blacklist instead
of a whitelist of safe characters .''' | if ' ' not in text :
return percent_encode ( text , encode_set , encoding )
else :
result = percent_encode ( text , encode_set , encoding )
return result . replace ( ' ' , '+' ) |
def remove_invalid ( self ) :
"""Remove entities which declare themselves invalid
Alters
self . entities : shortened""" | valid = np . array ( [ i . is_valid for i in self . entities ] , dtype = np . bool )
self . entities = self . entities [ valid ] |
def create ( self ) :
"""Create a new reminder .""" | params = { }
return self . send ( url = self . _base_url + 'create' , method = 'POST' , json = params ) |
def _generatePayload ( self , query ) :
"""Adds the following defaults to the payload :
_ _ rev , _ _ user , _ _ a , ttstamp , fb _ dtsg , _ _ req""" | payload = self . _payload_default . copy ( )
if query :
payload . update ( query )
payload [ "__req" ] = str_base ( self . _req_counter , 36 )
payload [ "seq" ] = self . _seq
self . _req_counter += 1
return payload |
def delete_from_matching_blacklist ( db , entity ) :
"""Remove an blacklisted entity from the registry .
This function removes the given blacklisted entity from the registry .
It checks first whether the excluded entity is already on the registry .
When it is found , the entity is removed . Otherwise , it will raise
a ' NotFoundError ' .
: param db : database manager
: param entity : blacklisted entity to remove
: raises NotFoundError : raised when the blacklisted entity does not exist
in the registry .""" | with db . connect ( ) as session :
mb = session . query ( MatchingBlacklist ) . filter ( MatchingBlacklist . excluded == entity ) . first ( )
if not mb :
raise NotFoundError ( entity = entity )
delete_from_matching_blacklist_db ( session , mb ) |
def not_equal ( lhs , rhs ) :
"""Returns the result of element - wise * * not equal to * * ( ! = ) comparison operation
with broadcasting .
For each element in input arrays , return 1 ( true ) if corresponding elements are different ,
otherwise return 0 ( false ) .
Equivalent to ` ` lhs ! = rhs ` ` and ` ` mx . nd . broadcast _ not _ equal ( lhs , rhs ) ` ` .
. . note : :
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape .
Parameters
lhs : scalar or mxnet . ndarray . array
First array to be compared .
rhs : scalar or mxnet . ndarray . array
Second array to be compared . If ` ` lhs . shape ! = rhs . shape ` ` , they must be
broadcastable to a common shape .
Returns
NDArray
Output array of boolean values .
Examples
> > > x = mx . nd . ones ( ( 2,3 ) )
> > > y = mx . nd . arange ( 2 ) . reshape ( ( 2,1 ) )
> > > z = mx . nd . arange ( 2 ) . reshape ( ( 1,2 ) )
> > > x . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 1 . , 1 . , 1 . ] ] , dtype = float32)
> > > y . asnumpy ( )
array ( [ [ 0 . ] ,
[ 1 . ] ] , dtype = float32)
> > > z . asnumpy ( )
array ( [ [ 0 . , 1 . ] ] , dtype = float32)
> > > ( z = = y ) . asnumpy ( )
array ( [ [ 1 . , 0 . ] ,
[ 0 . , 1 . ] ] , dtype = float32)
> > > ( x ! = 1 ) . asnumpy ( )
array ( [ [ 0 . , 0 . , 0 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > ( x ! = y ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > mx . nd . not _ equal ( x , y ) . asnumpy ( )
array ( [ [ 1 . , 1 . , 1 . ] ,
[ 0 . , 0 . , 0 . ] ] , dtype = float32)
> > > ( z ! = y ) . asnumpy ( )
array ( [ [ 0 . , 1 . ] ,
[ 1 . , 0 . ] ] , dtype = float32)""" | # pylint : disable = no - member , protected - access
return _ufunc_helper ( lhs , rhs , op . broadcast_not_equal , lambda x , y : 1 if x != y else 0 , _internal . _not_equal_scalar , None ) |
def second_params_two ( test , two ) :
"""Second resource
* POST : return [ test , two , request data ]
* GET : return [ test , two ]""" | if request . method == 'POST' :
return [ test , two , request . data ]
return { 'result' : [ test , two ] } |
def is_solid ( regex ) :
"""Check the given regular expression is solid .
> > > is _ solid ( r ' a ' )
True
> > > is _ solid ( r ' [ ab ] ' )
True
> > > is _ solid ( r ' ( a | b | c ) ' )
True
> > > is _ solid ( r ' ( a | b | c ) ? ' )
True
> > > is _ solid ( r ' ( a | b ) ( c ) ' )
False
> > > is _ solid ( r ' ( a | b ) ( c ) ? ' )
False""" | shape = re . sub ( r'(\\.|[^\[\]\(\)\|\?\+\*])' , '#' , regex )
skeleton = shape . replace ( '#' , '' )
if len ( shape ) <= 1 :
return True
if re . match ( r'^\[[^\]]*\][\*\+\?]?$' , shape ) :
return True
if re . match ( r'^\([^\(]*\)[\*\+\?]?$' , shape ) :
return True
if re . match ( r'^\(\)#*?\)\)' , skeleton ) :
return True
else :
return False |
def fix_bam_header ( job , bamfile , sample_type , univ_options , samtools_options , retained_chroms = None ) :
"""Fix the bam header to remove the command line call . Failing to do this causes Picard to reject
the bam .
: param dict bamfile : The input bam file
: param str sample _ type : Description of the sample to inject into the filename
: param dict univ _ options : Dict of universal options used by almost all tools
: param dict samtools _ options : Options specific to samtools
: param list retained _ chroms : A list of chromosomes to retain
: return : fsID for the output bam
: rtype : toil . fileStore . FileID""" | if retained_chroms is None :
retained_chroms = [ ]
work_dir = os . getcwd ( )
input_files = { sample_type + '.bam' : bamfile }
input_files = get_files_from_filestore ( job , input_files , work_dir , docker = True )
parameters = [ 'view' , '-H' , input_files [ sample_type + '.bam' ] ]
with open ( '/' . join ( [ work_dir , sample_type + '_input_bam.header' ] ) , 'w' ) as headerfile :
docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = headerfile , tool_version = samtools_options [ 'version' ] )
with open ( headerfile . name , 'r' ) as headerfile , open ( '/' . join ( [ work_dir , sample_type + '_output_bam.header' ] ) , 'w' ) as outheaderfile :
for line in headerfile :
if line . startswith ( '@PG' ) :
line = '\t' . join ( [ x for x in line . strip ( ) . split ( '\t' ) if not x . startswith ( 'CL' ) ] )
if retained_chroms and line . startswith ( '@SQ' ) :
if line . strip ( ) . split ( ) [ 1 ] . lstrip ( 'SN:' ) not in retained_chroms :
continue
print ( line . strip ( ) , file = outheaderfile )
parameters = [ 'reheader' , docker_path ( outheaderfile . name ) , input_files [ sample_type + '.bam' ] ]
with open ( '/' . join ( [ work_dir , sample_type + '_fixPG.bam' ] ) , 'w' ) as fixpg_bamfile :
docker_call ( tool = 'samtools' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , outfile = fixpg_bamfile , tool_version = samtools_options [ 'version' ] )
output_file = job . fileStore . writeGlobalFile ( fixpg_bamfile . name )
# The old bam file is now useless .
job . fileStore . deleteGlobalFile ( bamfile )
job . fileStore . logToMaster ( 'Ran reheader on %s:%s successfully' % ( univ_options [ 'patient' ] , sample_type ) )
return output_file |
def parse ( self , contents ) :
"""Parse the document .
: param contents : The text contents of the document .
: rtype : a * generator * of tokenized text .""" | i = 0
for text in contents . split ( self . delim ) :
if not len ( text . strip ( ) ) :
continue
words = text . split ( )
char_offsets = [ 0 ] + [ int ( _ ) for _ in np . cumsum ( [ len ( x ) + 1 for x in words ] ) [ : - 1 ] ]
text = " " . join ( words )
yield { "text" : text , "words" : words , "pos_tags" : [ "" ] * len ( words ) , "ner_tags" : [ "" ] * len ( words ) , "lemmas" : [ "" ] * len ( words ) , "dep_parents" : [ 0 ] * len ( words ) , "dep_labels" : [ "" ] * len ( words ) , "char_offsets" : char_offsets , "abs_char_offsets" : char_offsets , }
i += 1 |
def debug ( self , value ) :
"""Turn on debug logging if necessary .
: param value : Value of debug flag""" | self . _debug = value
if self . _debug : # Turn on debug logging
logging . getLogger ( ) . setLevel ( logging . DEBUG ) |
def main ( ) :
"""Budou main method for the command line tool .""" | args = docopt ( __doc__ )
if args [ '--version' ] :
print ( __version__ )
sys . exit ( )
result = parse ( args [ '<source>' ] , segmenter = args [ '--segmenter' ] , language = args [ '--language' ] , classname = args [ '--classname' ] )
print ( result [ 'html_code' ] )
sys . exit ( ) |
def index ( self , row , column , parent = None ) :
"""Return the index of the item in the model specified by the given row ,
column and parent index .
: param row : the row of the item
: type row : int
: param column : the column for the item
: type column : int
: param parent : the parent index
: type parent : : class : ` QtCore . QModelIndex ` :
: returns : the index of the item
: rtype : : class : ` QtCore . QModelIndex `
: raises : None""" | if parent is None :
parent = QtCore . QModelIndex ( )
if not self . hasIndex ( row , column , parent ) :
return QtCore . QModelIndex ( )
if parent . isValid ( ) :
parentItem = parent . internalPointer ( )
else :
parentItem = self . _root
try :
childItem = parentItem . child ( row )
return self . createIndex ( row , column , childItem )
except IndexError :
return QtCore . QModelIndex ( ) |
def mzn2fzn ( mzn , * dzn_files , args = None , data = None , include = None , stdlib_dir = None , globals_dir = None , declare_enums = True , allow_multiple_assignments = False , keep = False , output_vars = None , output_base = None , output_mode = 'item' , no_ozn = False ) :
"""Flatten a MiniZinc model into a FlatZinc one .
This function is equivalent to the command ` ` minizinc - - compile ` ` .
Parameters
mzn : str
The minizinc model . This can be either the path to the ` ` . mzn ` ` file or
the content of the model itself .
* dzn _ files
A list of paths to dzn files to attach to the minizinc execution ,
provided as positional arguments ; by default no data file is attached .
args : dict
Arguments for the template engine .
data : dict
Additional data as a dictionary of variables assignments to supply to
the minizinc executable . The dictionary is automatically converted to
dzn format by the ` ` pymzn . dict2dzn ` ` function .
include : str or list
One or more additional paths to search for included ` ` . mzn ` ` files .
stdlib _ dir : str
The path to the MiniZinc standard library . Provide it only if it is
different from the default one .
globals _ dir : str
The path to the MiniZinc globals directory . Provide it only if it is
different from the default one .
declare _ enums : bool
Whether to declare enum types when converting inline data into dzn
format . If the enum types are declared elsewhere this option should be
False . Default is ` ` True ` ` .
allow _ multiple _ assignments : bool
Whether to allow multiple assignments of variables . Sometimes is
convenient to simply let the data file override the value already
assigned in the minizinc file . Default is ` ` False ` ` .
keep : bool
Whether to keep the generated ` ` . mzn ` ` , ` ` . dzn ` ` , ` ` . fzn ` ` and ` ` . ozn ` `
files or not . If False , the generated files are created as temporary
files which will be deleted right after the problem is solved . Though
files generated by PyMzn are not intended to be kept , this property can
be used for debugging purpose . Note that in case of error the files are
not deleted even if this parameter is ` ` False ` ` . Default is ` ` False ` ` .
output _ vars : list of str
A list of output variables . These variables will be the ones included in
the output dictionary . Only available if ` ` ouptut _ mode = ' dict ' ` ` .
output _ base : str
Output directory for the files generated by PyMzn . The default
( ` ` None ` ` ) is the temporary directory of your OS ( if ` ` keep = False ` ` ) or
the current working directory ( if ` ` keep = True ` ` ) .
output _ mode : { ' dict ' , ' item ' , ' dzn ' , ' json ' , ' raw ' }
The desired output format . The default is ` ` ' dict ' ` ` which returns a
stream of solutions decoded as python dictionaries . The ` ` ' item ' ` `
format outputs a stream of strings as returned by the ` ` solns2out ` `
tool , formatted according to the output statement of the MiniZinc model .
The ` ` ' dzn ' ` ` and ` ` ' json ' ` ` formats output a stream of strings
formatted in dzn and json respectively . The ` ` ' raw ' ` ` format , instead
returns the whole solution stream , without parsing .
no _ ozn : bool
If ` ` True ` ` , the ozn file is not produced , ` ` False ` ` otherwise .
Returns
tuple ( str , str )
The paths to the generated fzn and ozn files . If ` ` no _ ozn = True ` ` , the
second argument is ` ` None ` ` .""" | mzn_file , dzn_files , data_file , data , keep , _output_mode , types = _minizinc_preliminaries ( mzn , * dzn_files , args = args , data = data , include = include , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_vars = output_vars , keep = keep , output_base = output_base , output_mode = output_mode , declare_enums = declare_enums , allow_multiple_assignments = allow_multiple_assignments )
args = [ '--compile' ]
args += _flattening_args ( mzn_file , * dzn_files , data = data , stdlib_dir = stdlib_dir , globals_dir = globals_dir , output_mode = output_mode , include = include , no_ozn = no_ozn , output_base = output_base , allow_multiple_assignments = allow_multiple_assignments )
t0 = _time ( )
_run_minizinc ( * args )
flattening_time = _time ( ) - t0
logger . info ( 'Flattening completed in {:>3.2f} sec' . format ( flattening_time ) )
if not keep :
with contextlib . suppress ( FileNotFoundError ) :
if data_file :
os . remove ( data_file )
logger . info ( 'Deleted file: {}' . format ( data_file ) )
if output_base :
mzn_base = output_base
else :
mzn_base = os . path . splitext ( mzn_file ) [ 0 ]
fzn_file = '.' . join ( [ mzn_base , 'fzn' ] )
fzn_file = fzn_file if os . path . isfile ( fzn_file ) else None
ozn_file = '.' . join ( [ mzn_base , 'ozn' ] )
ozn_file = ozn_file if os . path . isfile ( ozn_file ) else None
if fzn_file :
logger . info ( 'Generated file: {}' . format ( fzn_file ) )
if ozn_file :
logger . info ( 'Generated file: {}' . format ( ozn_file ) )
return fzn_file , ozn_file |
def _get_acceptable_response_type ( ) :
"""Return the mimetype for this request .""" | if ( 'Accept' not in request . headers or request . headers [ 'Accept' ] in ALL_CONTENT_TYPES ) :
return JSON
acceptable_content_types = set ( request . headers [ 'ACCEPT' ] . strip ( ) . split ( ',' ) )
if acceptable_content_types & HTML_CONTENT_TYPES :
return HTML
elif acceptable_content_types & JSON_CONTENT_TYPES :
return JSON
else : # HTTP 406 Not Acceptable
raise InvalidAPIUsage ( 406 ) |
def is_logon ( self , verify = False ) :
"""Return a boolean indicating whether the session is currently logged on
to the HMC .
By default , this method checks whether there is a session - id set
and considers that sufficient for determining that the session is
logged on . The ` verify ` parameter can be used to verify the validity
of a session - id that is already set , by issuing a dummy operation
( " Get Console Properties " ) to the HMC .
Parameters :
verify ( bool ) : If a session - id is already set , verify its validity .""" | if self . _session_id is None :
return False
if verify :
try :
self . get ( '/api/console' , logon_required = True )
except ServerAuthError :
return False
return True |
def memory_usage ( self , index = True , deep = False ) :
"""Returns the memory usage of each column in bytes
Args :
index ( bool ) : Whether to include the memory usage of the DataFrame ' s
index in returned Series . Defaults to True
deep ( bool ) : If True , introspect the data deeply by interrogating
objects dtypes for system - level memory consumption . Defaults to False
Returns :
A Series where the index are the column names and the values are
the memory usage of each of the columns in bytes . If ` index = true ` ,
then the first value of the Series will be ' Index ' with its memory usage .""" | assert not index , "Internal Error. Index must be evaluated in child class"
return self . _reduce_dimension ( self . _query_compiler . memory_usage ( index = index , deep = deep ) ) |
def get_station_and_time ( wxdata : [ str ] ) -> ( [ str ] , str , str ) : # type : ignore
"""Returns the report list and removed station ident and time strings""" | station = wxdata . pop ( 0 )
qtime = wxdata [ 0 ]
if wxdata and qtime . endswith ( 'Z' ) and qtime [ : - 1 ] . isdigit ( ) :
rtime = wxdata . pop ( 0 )
elif wxdata and len ( qtime ) == 6 and qtime . isdigit ( ) :
rtime = wxdata . pop ( 0 ) + 'Z'
else :
rtime = ''
return wxdata , station , rtime |
def bind_to_instance ( self , instance ) :
"""Bind a ResourceApi instance to an operation .""" | self . binding = instance
self . middleware . append ( instance ) |
def png ( out , metadata , f ) :
"""Convert to PNG format .
` metadata ` should be a Plan9 5 - tuple ;
` f ` the input file ( see : meth : ` pixmeta ` ) .""" | import png
pixels , meta = pixmeta ( metadata , f )
p = png . Writer ( ** meta )
p . write ( out , pixels ) |
def vcenter_id ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
vcenter = ET . SubElement ( config , "vcenter" , xmlns = "urn:brocade.com:mgmt:brocade-vswitch" )
id = ET . SubElement ( vcenter , "id" )
id . text = kwargs . pop ( 'id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_assessment_part_form ( self , * args , ** kwargs ) :
"""Pass through to provider AssessmentPartAdminSession . get _ assessment _ part _ form _ for _ update""" | # This method might be a bit sketchy . Time will tell .
if isinstance ( args [ - 1 ] , list ) or 'assessment_part_record_types' in kwargs :
return self . get_assessment_part_form_for_create ( * args , ** kwargs )
else :
return self . get_assessment_part_form_for_update ( * args , ** kwargs ) |
def getBinding ( self ) :
"""Return the Binding object that is referenced by this port .""" | wsdl = self . getService ( ) . getWSDL ( )
return wsdl . bindings [ self . binding ] |
def order_limit ( self , timeInForce = TIME_IN_FORCE_GTC , ** params ) :
"""Send in a new limit order
Any order with an icebergQty MUST have timeInForce set to GTC .
: param symbol : required
: type symbol : str
: param side : required
: type side : str
: param quantity : required
: type quantity : decimal
: param price : required
: type price : str
: param timeInForce : default Good till cancelled
: type timeInForce : str
: param newClientOrderId : A unique id for the order . Automatically generated if not sent .
: type newClientOrderId : str
: param icebergQty : Used with LIMIT , STOP _ LOSS _ LIMIT , and TAKE _ PROFIT _ LIMIT to create an iceberg order .
: type icebergQty : decimal
: param newOrderRespType : Set the response JSON . ACK , RESULT , or FULL ; default : RESULT .
: type newOrderRespType : str
: param recvWindow : the number of milliseconds the request is valid for
: type recvWindow : int
: returns : API response
See order endpoint for full response options
: raises : BinanceRequestException , BinanceAPIException , BinanceOrderException , BinanceOrderMinAmountException , BinanceOrderMinPriceException , BinanceOrderMinTotalException , BinanceOrderUnknownSymbolException , BinanceOrderInactiveSymbolException""" | params . update ( { 'type' : self . ORDER_TYPE_LIMIT , 'timeInForce' : timeInForce } )
return self . create_order ( ** params ) |
def p_val ( p ) :
"""bexpr : VAL bexpr % prec UMINUS""" | def val ( s ) :
try :
x = float ( s )
except :
x = 0
warning ( p . lineno ( 1 ) , "Invalid string numeric constant '%s' evaluated as 0" % s )
return x
if p [ 2 ] . type_ != TYPE . string :
api . errmsg . syntax_error_expected_string ( p . lineno ( 1 ) , TYPE . to_string ( p [ 2 ] . type_ ) )
p [ 0 ] = None
else :
p [ 0 ] = make_builtin ( p . lineno ( 1 ) , 'VAL' , p [ 2 ] , lambda x : val ( x ) , type_ = TYPE . float_ ) |
def _get ( url , profile ) :
'''Get a specific dashboard .''' | request_url = "{0}/api/dashboards/{1}" . format ( profile . get ( 'grafana_url' ) , url )
response = requests . get ( request_url , headers = { "Accept" : "application/json" , "Authorization" : "Bearer {0}" . format ( profile . get ( 'grafana_token' ) ) } , timeout = profile . get ( 'grafana_timeout' , 3 ) , )
data = response . json ( )
if data . get ( 'message' ) == 'Not found' :
return None
if 'dashboard' not in data :
return None
return data [ 'dashboard' ] |
def winning_name ( self ) :
"""Returns a ` ` string ` ` of the winning team ' s name , such as ' Purdue
Boilermakers ' .""" | if self . winner == HOME :
if 'cbb/schools' not in str ( self . _home_name ) :
return str ( self . _home_name )
return self . _home_name . text ( )
if 'cbb/schools' not in str ( self . _away_name ) :
return str ( self . _away_name )
return self . _away_name . text ( ) |
def handle ( self , pkt , raddress , rport ) :
"""Handle the packet in response to an ACK , which should be a DAT .""" | if isinstance ( pkt , TftpPacketDAT ) :
return self . handleDat ( pkt )
# Every other packet type is a problem .
elif isinstance ( pkt , TftpPacketACK ) : # Umm , we ACK , you don ' t .
self . sendError ( TftpErrors . IllegalTftpOp )
raise TftpException ( "Received ACK from peer when expecting DAT" )
elif isinstance ( pkt , TftpPacketWRQ ) :
self . sendError ( TftpErrors . IllegalTftpOp )
raise TftpException ( "Received WRQ from peer when expecting DAT" )
elif isinstance ( pkt , TftpPacketERR ) :
self . sendError ( TftpErrors . IllegalTftpOp )
raise TftpException ( "Received ERR from peer: " + str ( pkt ) )
else :
self . sendError ( TftpErrors . IllegalTftpOp )
raise TftpException ( "Received unknown packet type from peer: " + str ( pkt ) ) |
def set_metadata ( self , set_id , fp ) :
"""Set the XML metadata on a set .
: param file fp : file - like object to read the XML metadata from .""" | base_url = self . client . get_url ( 'SET' , 'GET' , 'single' , { 'id' : set_id } )
self . _metadata . set ( base_url , fp ) |
def resend_presence ( self ) :
"""Re - send the currently configured presence .
: return : Stanza token of the presence stanza or : data : ` None ` if the
stream is not established .
: rtype : : class : ` ~ . stream . StanzaToken `
. . note : :
: meth : ` set _ presence ` automatically broadcasts the new presence if
any of the parameters changed .""" | if self . client . established :
return self . client . enqueue ( self . make_stanza ( ) ) |
def get_urls ( self ) :
"""Add ` ` layout _ placeholder _ data ` ` URL .""" | # See : ` fluent _ pages . pagetypes . fluentpage . admin . FluentPageAdmin ` .
urls = super ( LayoutAdmin , self ) . get_urls ( )
my_urls = patterns ( '' , url ( r'^placeholder_data/(?P<id>\d+)/$' , self . admin_site . admin_view ( self . placeholder_data_view ) , name = 'layout_placeholder_data' , ) )
return my_urls + urls |
def build_unique_fragments ( self ) :
"""Find all possible fragment combinations of the MoleculeGraphs ( in other
words , all connected induced subgraphs )
: return :""" | self . set_node_attributes ( )
graph = self . graph . to_undirected ( )
nm = iso . categorical_node_match ( "specie" , "ERROR" )
# find all possible fragments , aka connected induced subgraphs
all_fragments = [ ]
for ii in range ( 1 , len ( self . molecule ) ) :
for combination in combinations ( graph . nodes , ii ) :
subgraph = nx . subgraph ( graph , combination )
if nx . is_connected ( subgraph ) :
all_fragments . append ( subgraph )
# narrow to all unique fragments using graph isomorphism
unique_fragments = [ ]
for fragment in all_fragments :
if not [ nx . is_isomorphic ( fragment , f , node_match = nm ) for f in unique_fragments ] . count ( True ) >= 1 :
unique_fragments . append ( fragment )
# convert back to molecule graphs
unique_mol_graphs = [ ]
for fragment in unique_fragments :
mapping = { e : i for i , e in enumerate ( sorted ( fragment . nodes ) ) }
remapped = nx . relabel_nodes ( fragment , mapping )
species = nx . get_node_attributes ( remapped , "specie" )
coords = nx . get_node_attributes ( remapped , "coords" )
edges = { }
for from_index , to_index , key in remapped . edges :
edge_props = fragment . get_edge_data ( from_index , to_index , key = key )
edges [ ( from_index , to_index ) ] = edge_props
unique_mol_graphs . append ( self . with_edges ( Molecule ( species = species , coords = coords , charge = self . molecule . charge ) , edges ) )
return unique_mol_graphs |
def request_permission ( cls , permissions ) :
"""Requests permission and returns an future result that returns a
boolean indicating if all the given permission were granted or denied .""" | app = AndroidApplication . instance ( )
f = app . create_future ( )
def on_result ( perms ) :
allowed = True
for p in permissions :
allowed = allowed and perms . get ( p , False )
f . set_result ( allowed )
app . request_permissions ( permissions ) . then ( on_result )
return f |
def send_http_error ( self , http_code , cim_error = None , cim_error_details = None , headers = None ) :
"""Send an HTTP response back to the WBEM server that indicates
an error at the HTTP level .""" | self . send_response ( http_code , http_client . responses . get ( http_code , '' ) )
self . send_header ( "CIMExport" , "MethodResponse" )
if cim_error is not None :
self . send_header ( "CIMError" , cim_error )
if cim_error_details is not None :
self . send_header ( "CIMErrorDetails" , cim_error_details )
if headers is not None :
for header , value in headers :
self . send_header ( header , value )
self . end_headers ( )
self . log ( '%s: HTTP status %s; CIMError: %s, CIMErrorDetails: %s' , ( self . _get_log_prefix ( ) , http_code , cim_error , cim_error_details ) , logging . WARNING ) |
def add_wikilink ( self , title , href , ** attrs ) :
"""Add a Wiki link to the project and returns a : class : ` WikiLink ` object .
: param title : title of the : class : ` WikiLink `
: param href : href of the : class : ` WikiLink `
: param attrs : optional attributes for : class : ` WikiLink `""" | return WikiLinks ( self . requester ) . create ( self . id , title , href , ** attrs ) |
def compute_aic ( model_object ) :
"""Compute the Akaike Information Criteria for an estimated model .
Parameters
model _ object : an MNDC _ Model ( multinomial discrete choice model ) instance .
The model should have already been estimated .
` model _ object . log _ likelihood ` should be a number , and
` model _ object . params ` should be a pandas Series .
Returns
aic : float .
The AIC for the estimated model .
Notes
aic = - 2 * log _ likelihood + 2 * num _ estimated _ parameters
References
Akaike , H . ( 1974 ) . ' A new look at the statistical identification model ' ,
IEEE Transactions on Automatic Control 19 , 6 : 716-723.""" | assert isinstance ( model_object . params , pd . Series )
assert isinstance ( model_object . log_likelihood , Number )
return - 2 * model_object . log_likelihood + 2 * model_object . params . size |
def extract ( self , item , list_article_candidate ) :
"""Compares the extracted publish dates .
: param item : The corresponding NewscrawlerItem
: param list _ article _ candidate : A list , the list of ArticleCandidate - Objects which have been extracted
: return : A string , the most likely publish date""" | list_publish_date = [ ]
for article_candidate in list_article_candidate :
if article_candidate . publish_date != None :
list_publish_date . append ( ( article_candidate . publish_date , article_candidate . extractor ) )
# If there is no value in the list , return None .
if len ( list_publish_date ) == 0 :
return None
# If there are more options than one , return the result from date _ extractor .
list_date_extractor = [ x for x in list_publish_date if x [ 1 ] == "date_extractor" ]
if len ( list_date_extractor ) == 0 : # If there is no date extracted by date _ extractor , return the first result of list _ publish _ date .
return list_publish_date [ 0 ] [ 0 ]
else :
return list_date_extractor [ 0 ] [ 0 ] |
def GetLinkedFileEntry ( self ) :
"""Retrieves the linked file entry , for example for a symbolic link .
Returns :
OSFileEntry : linked file entry or None if not available .""" | link = self . _GetLink ( )
if not link :
return None
path_spec = os_path_spec . OSPathSpec ( location = link )
return OSFileEntry ( self . _resolver_context , self . _file_system , path_spec ) |
def flag_values_dict ( self ) :
"""Returns a dictionary that maps flag names to flag values .""" | return { name : flag . value for name , flag in six . iteritems ( self . _flags ( ) ) } |
def set_last_position ( self , last_position ) :
"""Called from the manager , it is in charge of updating the last position of data commited
by the writer , in order to have resume support""" | last_position = last_position or { }
last_position . setdefault ( 'readed_streams' , [ ] )
last_position . setdefault ( 'stream_offset' , { } )
self . last_position = last_position |
def inject_nmi ( self ) :
"""Inject NMI , Non Maskable Interrupt .
Inject NMI ( Non Maskable Interrupt ) for a node immediately .
: raises : IloError , on an error from iLO""" | sushy_system = self . _get_sushy_system ( PROLIANT_SYSTEM_ID )
if sushy_system . power_state != sushy . SYSTEM_POWER_STATE_ON :
raise exception . IloError ( "Server is not in powered on state." )
try :
sushy_system . reset_system ( sushy . RESET_NMI )
except sushy . exceptions . SushyError as e :
msg = ( self . _ ( 'The Redfish controller failed to inject nmi to ' 'server. Error %(error)s' ) % { 'error' : str ( e ) } )
LOG . debug ( msg )
raise exception . IloError ( msg ) |
def fit ( self , X , y , cost_mat , sample_weight = None ) :
"""Build a Bagging ensemble of estimators from the training set ( X , y ) .
Parameters
X : { array - like , sparse matrix } of shape = [ n _ samples , n _ features ]
The training input samples . Sparse matrices are accepted only if
they are supported by the base estimator .
y : array - like , shape = [ n _ samples ]
The target values ( class labels in classification , real numbers in
regression ) .
cost _ mat : array - like of shape = [ n _ samples , 4]
Cost matrix of the classification problem
Where the columns represents the costs of : false positives , false negatives ,
true positives and true negatives , for each example .
sample _ weight : array - like , shape = [ n _ samples ] or None
Sample weights . If None , then samples are equally weighted .
Note that this is supported only if the base estimator supports
sample weighting .
Returns
self : object
Returns self .""" | random_state = check_random_state ( self . random_state )
# Convert data
# X , y = check _ X _ y ( X , y , [ ' csr ' , ' csc ' , ' coo ' ] ) # Not in sklearn verion 0.15
# Remap output
n_samples , self . n_features_ = X . shape
y = self . _validate_y ( y )
# Check parameters
self . _validate_estimator ( )
if isinstance ( self . max_samples , ( numbers . Integral , np . integer ) ) :
max_samples = self . max_samples
else : # float
max_samples = int ( self . max_samples * X . shape [ 0 ] )
if not ( 0 < max_samples <= X . shape [ 0 ] ) :
raise ValueError ( "max_samples must be in (0, n_samples]" )
if isinstance ( self . max_features , ( numbers . Integral , np . integer ) ) :
max_features = self . max_features
else : # float
max_features = int ( self . max_features * self . n_features_ )
if not ( 0 < max_features <= self . n_features_ ) :
raise ValueError ( "max_features must be in (0, n_features]" )
# Free allocated memory , if any
self . estimators_ = None
# Parallel loop
n_jobs , n_estimators , starts = _partition_estimators ( self . n_estimators , self . n_jobs )
seeds = random_state . randint ( MAX_INT , size = self . n_estimators )
all_results = Parallel ( n_jobs = n_jobs , verbose = self . verbose ) ( delayed ( _parallel_build_estimators ) ( n_estimators [ i ] , self , X , y , cost_mat , seeds [ starts [ i ] : starts [ i + 1 ] ] , verbose = self . verbose ) for i in range ( n_jobs ) )
# Reduce
self . estimators_ = list ( itertools . chain . from_iterable ( t [ 0 ] for t in all_results ) )
self . estimators_samples_ = list ( itertools . chain . from_iterable ( t [ 1 ] for t in all_results ) )
self . estimators_features_ = list ( itertools . chain . from_iterable ( t [ 2 ] for t in all_results ) )
self . _evaluate_oob_savings ( X , y , cost_mat )
if self . combination in [ 'stacking' , 'stacking_proba' , 'stacking_bmr' , 'stacking_proba_bmr' ] :
self . _fit_stacking_model ( X , y , cost_mat )
if self . combination in [ 'majority_bmr' , 'weighted_bmr' , 'stacking_bmr' , 'stacking_proba_bmr' ] :
self . _fit_bmr_model ( X , y )
return self |
def find_gap ( l , value ) :
"""try to find a address gap in the list of modbus registers""" | for index in range ( len ( l ) ) :
if l [ index ] == value :
return None
if l [ index ] > value :
return index |
def parse_marker ( cls , line ) :
"""Returns a pair ( prepend , leader ) iff the line has a valid leader .""" | match_obj = cls . pattern . match ( line )
if match_obj is None :
return None
# no valid leader
leader = match_obj . group ( 1 )
content = match_obj . group ( 0 ) . replace ( leader + '\t' , leader + ' ' , 1 )
# reassign prepend and leader
prepend = len ( content )
if prepend == len ( line . rstrip ( '\n' ) ) :
prepend = match_obj . end ( 1 ) + 1
else :
spaces = match_obj . group ( 2 )
if spaces . startswith ( '\t' ) :
spaces = spaces . replace ( '\t' , ' ' , 1 )
spaces = spaces . replace ( '\t' , ' ' )
n_spaces = len ( spaces )
if n_spaces > 4 :
prepend = match_obj . end ( 1 ) + 1
return prepend , leader |
def send_pgroup_snapshot ( self , source , ** kwargs ) :
"""Send an existing pgroup snapshot to target ( s )
: param source : Name of pgroup snapshot to send .
: type source : str
: param \ * \ * kwargs : See the REST API Guide on your array for the
documentation on the request :
* * POST pgroup * *
: type \ * \ * kwargs : optional
: returns : A list of dictionaries describing the sent snapshots .
: rtype : ResponseList
. . note : :
Requires use of REST API 1.16 or later .""" | data = { "name" : [ source ] , "action" : "send" }
data . update ( kwargs )
return self . _request ( "POST" , "pgroup" , data ) |
def committed ( self , partition ) :
"""Get the last committed offset for the given partition .
This offset will be used as the position for the consumer
in the event of a failure .
This call may block to do a remote call if the partition in question
isn ' t assigned to this consumer or if the consumer hasn ' t yet
initialized its cache of committed offsets .
Arguments :
partition ( TopicPartition ) : The partition to check .
Returns :
The last committed offset , or None if there was no prior commit .""" | assert self . config [ 'api_version' ] >= ( 0 , 8 , 1 ) , 'Requires >= Kafka 0.8.1'
assert self . config [ 'group_id' ] is not None , 'Requires group_id'
if not isinstance ( partition , TopicPartition ) :
raise TypeError ( 'partition must be a TopicPartition namedtuple' )
if self . _subscription . is_assigned ( partition ) :
committed = self . _subscription . assignment [ partition ] . committed
if committed is None :
self . _coordinator . refresh_committed_offsets_if_needed ( )
committed = self . _subscription . assignment [ partition ] . committed
else :
commit_map = self . _coordinator . fetch_committed_offsets ( [ partition ] )
if partition in commit_map :
committed = commit_map [ partition ] . offset
else :
committed = None
return committed |
def inversion ( origin = ( 0 , 0 , 0 ) ) :
"""Inversion symmetry operation about axis .
Args :
origin ( 3x1 array ) : Origin of the inversion operation . Defaults
to [ 0 , 0 , 0 ] .
Returns :
SymmOp representing an inversion operation about the origin .""" | mat = - np . eye ( 4 )
mat [ 3 , 3 ] = 1
mat [ 0 : 3 , 3 ] = 2 * np . array ( origin )
return SymmOp ( mat ) |
def _lal_spectrum ( timeseries , segmentlength , noverlap = None , method = 'welch' , window = None , plan = None ) :
"""Generate a PSD ` FrequencySeries ` using | lal | _
Parameters
timeseries : ` ~ gwpy . timeseries . TimeSeries `
input ` TimeSeries ` data .
segmentlength : ` int `
number of samples in single average .
method : ` str `
average PSD method
noverlap : ` int `
number of samples to overlap between segments , defaults to 50 % .
window : ` lal . REAL8Window ` , optional
window to apply to timeseries prior to FFT
plan : ` lal . REAL8FFTPlan ` , optional
LAL FFT plan to use when generating average spectrum
Returns
spectrum : ` ~ gwpy . frequencyseries . FrequencySeries `
average power ` FrequencySeries `""" | import lal
from . . . utils . lal import find_typed_function
# default to 50 % overlap
if noverlap is None :
noverlap = int ( segmentlength // 2 )
stride = segmentlength - noverlap
# get window
if window is None :
window = generate_window ( segmentlength , dtype = timeseries . dtype )
# get FFT plan
if plan is None :
plan = generate_fft_plan ( segmentlength , dtype = timeseries . dtype )
method = method . lower ( )
# check data length
size = timeseries . size
numsegs = 1 + int ( ( size - segmentlength ) / stride )
if method == 'median-mean' and numsegs % 2 :
numsegs -= 1
if not numsegs :
raise ValueError ( "Cannot calculate median-mean spectrum with " "this small a TimeSeries." )
required = int ( ( numsegs - 1 ) * stride + segmentlength )
if size != required :
warnings . warn ( "Data array is the wrong size for the correct number " "of averages given the input parameters. The trailing " "%d samples will not be used in this calculation." % ( size - required ) )
timeseries = timeseries [ : required ]
# generate output spectrum
create = find_typed_function ( timeseries . dtype , 'Create' , 'FrequencySeries' )
lalfs = create ( timeseries . name , lal . LIGOTimeGPS ( timeseries . epoch . gps ) , 0 , 1 / segmentlength , lal . StrainUnit , int ( segmentlength // 2 + 1 ) )
# find LAL method ( e . g . median - mean - > lal . REAL8AverageSpectrumMedianMean )
methodname = '' . join ( map ( str . title , re . split ( '[-_]' , method ) ) )
spec_func = find_typed_function ( timeseries . dtype , '' , 'AverageSpectrum{}' . format ( methodname ) )
# calculate spectrum
spec_func ( lalfs , timeseries . to_lal ( ) , segmentlength , stride , window , plan )
# format and return
spec = FrequencySeries . from_lal ( lalfs )
spec . name = timeseries . name
spec . channel = timeseries . channel
spec . override_unit ( scale_timeseries_unit ( timeseries . unit , scaling = 'density' ) )
return spec |
def run_analysis ( args ) :
"""Builds an analysis files for training .""" | # Read the schema and input feature types
schema_list = json . loads ( file_io . read_file_to_string ( args . schema_file ) )
run_numerical_categorical_analysis ( args , schema_list )
# Also save a copy of the schema in the output folder .
file_io . copy ( args . schema_file , os . path . join ( args . output_dir , SCHEMA_FILE ) , overwrite = True ) |
def pull ( self , bookName = None , sheetName = None ) :
"""pull data into this OR . SHEET from a real book / sheet in Origin""" | # tons of validation
if bookName is None and self . bookName :
bookName = self . bookName
if sheetName is None and self . sheetName :
sheetName = self . sheetName
if bookName is None :
bookName = OR . activeBook ( )
if bookName and sheetName is None :
sheetName = OR . activeSheet ( )
if not bookName or not sheetName :
print ( "can't figure out where to pull from! [%s]%s" % ( bookName , sheetName ) )
return
# finally doing the thing
poSheet = OR . getSheet ( bookName , sheetName )
self . bookName = bookName
self . sheetName = sheetName
self . desc = poSheet . GetLongName ( )
self . colNames = [ poCol . GetName ( ) for poCol in poSheet . Columns ( ) ]
self . colDesc = [ poCol . GetLongName ( ) for poCol in poSheet . Columns ( ) ]
self . colUnits = [ poCol . GetUnits ( ) for poCol in poSheet . Columns ( ) ]
self . colComments = [ poCol . GetComments ( ) for poCol in poSheet . Columns ( ) ]
self . colTypes = [ poCol . GetType ( ) for poCol in poSheet . Columns ( ) ]
self . colData = [ poCol . GetData ( ) for poCol in poSheet . Columns ( ) ] |
def get_chron_data ( temp_sheet , row , total_vars ) :
"""Capture all data in for a specific chron data row ( for csv output )
: param obj temp _ sheet :
: param int row :
: param int total _ vars :
: return list : data _ row""" | data_row = [ ]
missing_val_list = [ 'none' , 'na' , '' , '-' ]
for i in range ( 0 , total_vars ) :
cell = temp_sheet . cell_value ( row , i )
if isinstance ( cell , str ) :
cell = cell . lower ( )
if cell in missing_val_list :
cell = 'nan'
data_row . append ( cell )
return data_row |
def apply_boundary_conditions ( self , ** params ) :
"""Applies each distributions ' boundary conditions to the given list
of parameters , returning a new list with the conditions applied .
Parameters
* * params :
Keyword arguments should give the parameters to apply the
conditions to .
Returns
dict
A dictionary of the parameters after each distribution ' s
` apply _ boundary _ conditions ` function has been applied .""" | for dist in self . distributions :
params . update ( dist . apply_boundary_conditions ( ** params ) )
return params |
def save_mat ( ts , filename ) :
"""save a Timeseries to a MATLAB . mat file
Args :
ts ( Timeseries ) : the timeseries to save
filename ( str ) : . mat filename to save to""" | import scipy . io as sio
tspan = ts . tspan
fs = ( 1.0 * len ( tspan ) - 1 ) / ( tspan [ - 1 ] - tspan [ 0 ] )
mat_dict = { 'data' : np . asarray ( ts ) , 'fs' : fs , 'labels' : ts . labels [ 1 ] }
sio . savemat ( filename , mat_dict , do_compression = True )
return |
def get_plugin_actions ( self ) :
"""Return a list of actions related to plugin""" | quit_action = create_action ( self , _ ( "&Quit" ) , icon = ima . icon ( 'exit' ) , tip = _ ( "Quit" ) , triggered = self . quit )
self . register_shortcut ( quit_action , "_" , "Quit" , "Ctrl+Q" )
run_action = create_action ( self , _ ( "&Run..." ) , None , ima . icon ( 'run_small' ) , _ ( "Run a Python script" ) , triggered = self . run_script )
environ_action = create_action ( self , _ ( "Environment variables..." ) , icon = ima . icon ( 'environ' ) , tip = _ ( "Show and edit environment variables" " (for current session)" ) , triggered = self . show_env )
syspath_action = create_action ( self , _ ( "Show sys.path contents..." ) , icon = ima . icon ( 'syspath' ) , tip = _ ( "Show (read-only) sys.path" ) , triggered = self . show_syspath )
buffer_action = create_action ( self , _ ( "Buffer..." ) , None , tip = _ ( "Set maximum line count" ) , triggered = self . change_max_line_count )
exteditor_action = create_action ( self , _ ( "External editor path..." ) , None , None , _ ( "Set external editor executable path" ) , triggered = self . change_exteditor )
wrap_action = create_action ( self , _ ( "Wrap lines" ) , toggled = self . toggle_wrap_mode )
wrap_action . setChecked ( self . get_option ( 'wrap' ) )
codecompletion_action = create_action ( self , _ ( "Automatic code completion" ) , toggled = self . toggle_codecompletion )
codecompletion_action . setChecked ( self . get_option ( 'codecompletion/auto' ) )
option_menu = QMenu ( _ ( 'Internal console settings' ) , self )
option_menu . setIcon ( ima . icon ( 'tooloptions' ) )
add_actions ( option_menu , ( buffer_action , wrap_action , codecompletion_action , exteditor_action ) )
plugin_actions = [ None , run_action , environ_action , syspath_action , option_menu , MENU_SEPARATOR , quit_action , self . undock_action ]
return plugin_actions |
def publish_price_feed ( self , symbol , settlement_price , cer = None , mssr = 110 , mcr = 200 , account = None ) :
"""Publish a price feed for a market - pegged asset
: param str symbol : Symbol of the asset to publish feed for
: param bitshares . price . Price settlement _ price : Price for settlement
: param bitshares . price . Price cer : Core exchange Rate ( default
` ` settlement _ price + 5 % ` ` )
: param float mssr : Percentage for max short squeeze ratio ( default :
110 % )
: param float mcr : Percentage for maintenance collateral ratio
( default : 200 % )
: param str account : ( optional ) the account to allow access
to ( defaults to ` ` default _ account ` ` )
. . note : : The ` ` account ` ` needs to be allowed to produce a
price feed for ` ` symbol ` ` . For witness produced
feeds this means ` ` account ` ` is a witness account !""" | assert mcr > 100
assert mssr > 100
assert isinstance ( settlement_price , Price ) , "settlement_price needs to be instance of `bitshares.price.Price`!"
assert cer is None or isinstance ( cer , Price ) , "cer needs to be instance of `bitshares.price.Price`!"
if not account :
if "default_account" in self . config :
account = self . config [ "default_account" ]
if not account :
raise ValueError ( "You need to provide an account" )
account = Account ( account , blockchain_instance = self )
asset = Asset ( symbol , blockchain_instance = self , full = True )
backing_asset = asset [ "bitasset_data" ] [ "options" ] [ "short_backing_asset" ]
assert ( asset [ "id" ] == settlement_price [ "base" ] [ "asset" ] [ "id" ] or asset [ "id" ] == settlement_price [ "quote" ] [ "asset" ] [ "id" ] ) , "Price needs to contain the asset of the symbol you'd like to produce a feed for!"
assert asset . is_bitasset , "Symbol needs to be a bitasset!"
assert ( settlement_price [ "base" ] [ "asset" ] [ "id" ] == backing_asset or settlement_price [ "quote" ] [ "asset" ] [ "id" ] == backing_asset ) , "The Price needs to be relative to the backing collateral!"
settlement_price = settlement_price . as_base ( symbol )
if cer :
cer = cer . as_base ( symbol )
if cer [ "quote" ] [ "asset" ] [ "id" ] != "1.3.0" :
raise ValueError ( "CER must be defined against core asset '1.3.0'" )
else :
if settlement_price [ "quote" ] [ "asset" ] [ "id" ] != "1.3.0" :
raise ValueError ( "CER must be manually provided because it relates to core asset '1.3.0'" )
cer = settlement_price . as_quote ( symbol ) * 0.95
op = operations . Asset_publish_feed ( ** { "fee" : { "amount" : 0 , "asset_id" : "1.3.0" } , "publisher" : account [ "id" ] , "asset_id" : asset [ "id" ] , "feed" : { "settlement_price" : settlement_price . as_base ( symbol ) . json ( ) , "core_exchange_rate" : cer . as_base ( symbol ) . json ( ) , "maximum_short_squeeze_ratio" : int ( mssr * 10 ) , "maintenance_collateral_ratio" : int ( mcr * 10 ) , } , "prefix" : self . prefix , } )
return self . finalizeOp ( op , account [ "name" ] , "active" ) |
def class_wise_accuracy ( self , event_label , factor = 0.5 ) :
"""Class - wise accuracy metrics ( sensitivity , specificity , accuracy , and balanced _ accuracy )
Returns
dict
results in a dictionary format""" | sensitivity = metric . sensitivity ( Ntp = self . class_wise [ event_label ] [ 'Ntp' ] , Nfn = self . class_wise [ event_label ] [ 'Nfn' ] )
specificity = metric . specificity ( Ntn = self . class_wise [ event_label ] [ 'Ntn' ] , Nfp = self . class_wise [ event_label ] [ 'Nfp' ] )
balanced_accuracy = metric . balanced_accuracy ( sensitivity = sensitivity , specificity = specificity , factor = factor )
accuracy = metric . accuracy ( Ntp = self . class_wise [ event_label ] [ 'Ntp' ] , Ntn = self . class_wise [ event_label ] [ 'Ntn' ] , Nfp = self . class_wise [ event_label ] [ 'Nfp' ] , Nfn = self . class_wise [ event_label ] [ 'Nfn' ] )
return { 'accuracy' : accuracy , 'balanced_accuracy' : balanced_accuracy , 'sensitivity' : sensitivity , 'specificity' : specificity } |
def predict ( self , data ) :
"""Predict a new data set based on an estimated model .
Parameters
data : pandas . DataFrame
Data to use for prediction . Must contain all the columns
referenced by the right - hand side of the ` model _ expression ` .
Returns
result : pandas . Series
Predicted values as a pandas Series . Will have the index of ` data `
after applying filters .""" | self . assert_fitted ( )
with log_start_finish ( 'predicting model {}' . format ( self . name ) , logger ) :
return predict ( data , self . predict_filters , self . model_fit , self . ytransform ) |
def load ( self , walkthrough ) :
"""Loads the XML text for a new walkthrough .
: param walkthrough | < XWalkthrough > | | < str > | | < xml . etree . ElementTree . Element >""" | if type ( walkthrough ) in ( str , unicode ) :
walkthrough = XWalkthrough . load ( walkthrough )
self . setUpdatesEnabled ( False )
self . clear ( )
for slide in walkthrough . slides ( ) :
self . addSlide ( slide )
self . setUpdatesEnabled ( True )
self . updateUi ( ) |
def fill_gaps ( lat , lon , sla , mask , remove_edges = False ) :
"""# FILL _ GAPS
# @ summary : This function allow interpolating data in gaps , depending on gap size . Data must be regularly gridded
# @ param lat { type : numeric } : latitude
# @ param lon { type : numeric } : longitude
# @ param sla { type : numeric } : data
# @ return :
# outdst : resampled distance
# outlon : resampled longitude
# outlat : resampled latitude
# outsla : resampled data
# gaplen : length of the longest gap in data
# ngaps : number of detected gaps in data
# dx : average spatial sampling
# interpolated : True when data was interpolated ( empty bin )
# @ author : Renaud DUSSURGET ( RD ) - LER / PAC , Ifremer
# @ change : Created by RD , July 2012
# 29/08/2012 : Major change - > number of output variables changes ( added INTERPOLATED ) , and rebinning modified
# 06/11/2012 : Included in alti _ tools lib""" | dst = calcul_distance ( lat , lon )
# Find gaps in data
dx = dst [ 1 : ] - dst [ : - 1 ]
mn_dx = np . median ( dx )
nx = len ( sla )
flag = ~ mask
# Get filled bins indices
outsla = sla . copy ( )
outlon = lon . copy ( )
outlat = lat . copy ( )
outind = np . arange ( nx )
# Replace missing data on edges by the latest valid point
first = np . where ( ( flag ) ) [ 0 ] . min ( )
last = np . where ( ( flag ) ) [ 0 ] . max ( )
if remove_edges :
outsla = outsla [ first : last + 1 ]
outlon = outlon [ first : last + 1 ]
outlat = outlat [ first : last + 1 ]
outind = outind [ first : last + 1 ]
mask = mask [ first : last + 1 ]
flag = flag [ first : last + 1 ]
else :
outsla [ 0 : first ] = outsla [ first ]
outsla [ last : ] = outsla [ last ]
# Get gap properties
hist = np . ones ( nx , dtype = int )
hist [ outsla . mask ] = 0
while hist [ 0 ] == 0 :
hist = np . delete ( hist , [ 0 ] )
while hist [ - 1 ] == 0 :
hist = np . delete ( hist , [ len ( hist ) - 1 ] )
ind = np . arange ( len ( hist ) )
dhist = ( hist [ 1 : ] - hist [ : - 1 ] )
st = ind . compress ( dhist == - 1 ) + 1
en = ind . compress ( dhist == 1 )
gaplen = ( en - st ) + 1
ngaps = len ( st )
gapedges = np . array ( [ st , en ] )
ok = np . where ( flag ) [ 0 ]
empty = np . where ( mask ) [ 0 ]
# Fill the gaps if there are some
if len ( empty ) > 0 : # Interpolate lon , lat @ empty positions
outsla [ empty ] = interp1d ( ok , outsla [ ok ] , empty )
# Get empty bin flag
interpolated = ~ hist . astype ( 'bool' )
return outsla , outlon , outlat , outind , ngaps , gapedges , gaplen , interpolated |
def on_use_runtime_value_toggled ( self , widget , path ) :
"""Try to set the use runtime value flag to the newly entered one""" | try :
data_port_id = self . list_store [ path ] [ self . ID_STORAGE_ID ]
self . toggle_runtime_value_usage ( data_port_id )
except TypeError as e :
logger . exception ( "Error while trying to change the use_runtime_value flag" ) |
def _parse_command_line_arguments ( ) :
"""Transform vispy specific command line args to vispy config .
Put into a function so that any variables dont leak in the vispy namespace .""" | global config
# Get command line args for vispy
argnames = [ 'vispy-backend=' , 'vispy-gl-debug' , 'vispy-glir-file=' , 'vispy-log=' , 'vispy-help' , 'vispy-profile=' , 'vispy-cprofile' , 'vispy-dpi=' , 'vispy-audit-tests' ]
try :
opts , args = getopt . getopt ( sys . argv [ 1 : ] , '' , argnames )
except getopt . GetoptError :
opts = [ ]
# Use them to set the config values
for o , a in opts :
if o . startswith ( '--vispy' ) :
if o == '--vispy-backend' :
config [ 'default_backend' ] = a
logger . info ( 'vispy backend: %s' , a )
elif o == '--vispy-gl-debug' :
config [ 'gl_debug' ] = True
elif o == '--vispy-glir-file' :
config [ 'glir_file' ] = a
elif o == '--vispy-log' :
if ',' in a :
verbose , match = a . split ( ',' )
else :
verbose = a
match = None
config [ 'logging_level' ] = a
set_log_level ( verbose , match )
elif o == '--vispy-profile' :
config [ 'profile' ] = a
elif o == '--vispy-cprofile' :
_enable_profiling ( )
elif o == '--vispy-help' :
print ( VISPY_HELP )
elif o == '--vispy-dpi' :
config [ 'dpi' ] = int ( a )
elif o == '--vispy-audit-tests' :
config [ 'audit_tests' ] = True
else :
logger . warning ( "Unsupported vispy flag: %s" % o ) |
def _RawGlobPathSpecWithNumericSchema ( file_system , parent_path_spec , segment_format , location , segment_number ) :
"""Globs for path specifications according to a numeric naming schema .
Args :
file _ system ( FileSystem ) : file system .
parent _ path _ spec ( PathSpec ) : parent path specification .
segment _ format ( str ) : naming schema of the segment file location .
location ( str ) : the base segment file location string .
segment _ number ( int ) : first segment number .
Returns :
list [ PathSpec ] : path specifications that match the glob .""" | segment_files = [ ]
while True :
segment_location = segment_format . format ( location , segment_number )
# Note that we don ' t want to set the keyword arguments when not used
# because the path specification base class will check for unused
# keyword arguments and raise .
kwargs = path_spec_factory . Factory . GetProperties ( parent_path_spec )
kwargs [ 'location' ] = segment_location
if parent_path_spec . parent is not None :
kwargs [ 'parent' ] = parent_path_spec . parent
segment_path_spec = path_spec_factory . Factory . NewPathSpec ( parent_path_spec . type_indicator , ** kwargs )
if not file_system . FileEntryExistsByPathSpec ( segment_path_spec ) :
break
segment_files . append ( segment_path_spec )
segment_number += 1
return segment_files |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.