signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def to_pandas ( self ) :
"""Convert to pandas MultiIndex .
Returns
pandas . base . MultiIndex""" | if not all ( ind . is_raw ( ) for ind in self . values ) :
raise ValueError ( 'Cannot convert to pandas MultiIndex if not evaluated.' )
from pandas import MultiIndex as PandasMultiIndex
arrays = [ ind . values for ind in self . values ]
return PandasMultiIndex . from_arrays ( arrays , names = self . names ) |
def mapToLineCol ( self , absPosition ) :
"""Convert absolute position to ` ` ( line , column ) ` `""" | block = self . document ( ) . findBlock ( absPosition )
if not block . isValid ( ) :
raise IndexError ( "Invalid absolute position %d" % absPosition )
return ( block . blockNumber ( ) , absPosition - block . position ( ) ) |
def createRatModuleFromReadoutResolution ( inverseReadoutResolution , scale , enlargeModuleFactor = 1. , fixedScale = False , ** kwargs ) :
"""@ param inverseReadoutResolution ( int or float )
Equivalent to 1 / readoutResolution , but specified this way as a convenience
( because it ' s easier and less ambiguous to type 3 than to type 0.33333 ) . The
readout resolution specifies the diameter of the circle of phases in the
rhombus encoded by a bump . So when a bump of activity is converted into a set
of active cells , this circle of active cells will have a diameter of at least
this amount .
@ param enlargeModuleFactor ( float )
A multiplicative factor that ' s used to simulate the effect of having a larger
module , keeping the bump size fixed but making the module larger , so that the
bump is smaller relative to the size of the module . Equivalently , this shrinks
the bump , increases the precision of the readout , adds more cells , and
increases the scale so that the bump is the same size when overlayed on the
real world .
@ param fixedScale ( bool )
By default , the enlargeModuleFactor will increase the scale , effectively
holding the bump size constant relative to physical space . Set this to True to
hold the scale constant , so enlarging the module causes the bump size to
shrink relative to physical space .""" | params = computeRatModuleParametersFromReadoutResolution ( inverseReadoutResolution , enlargeModuleFactor )
params . update ( kwargs )
params [ "scale" ] = ( scale if fixedScale else scale * enlargeModuleFactor )
return ThresholdedGaussian2DLocationModule ( ** params ) |
def _make_key ( relation ) :
"""Make _ ReferenceKeys with lowercase values for the cache so we don ' t have
to keep track of quoting""" | return _ReferenceKey ( _lower ( relation . database ) , _lower ( relation . schema ) , _lower ( relation . identifier ) ) |
def poll ( self ) :
"""Check if the operation has finished .
: rtype : bool
: returns : A boolean indicating if the current operation has completed .
: raises ValueError : if the operation
has already completed .""" | if self . complete :
raise ValueError ( "The operation has completed." )
operation_pb = self . _get_operation ( )
self . _update_state ( operation_pb )
return self . complete |
def create_tar_archive ( self ) :
"""Create a tar archive of the main simulation outputs .""" | # file filter
EXCLUDE_FILES = glob . glob ( os . path . join ( self . savefolder , 'cells' ) )
EXCLUDE_FILES += glob . glob ( os . path . join ( self . savefolder , 'populations' , 'subsamples' ) )
EXCLUDE_FILES += glob . glob ( os . path . join ( self . savefolder , 'raw_nest_output' ) )
def filter_function ( tarinfo ) :
print ( tarinfo . name )
if len ( [ f for f in EXCLUDE_FILES if os . path . split ( tarinfo . name ) [ - 1 ] in os . path . split ( f ) [ - 1 ] ] ) > 0 or len ( [ f for f in EXCLUDE_FILES if os . path . split ( tarinfo . path ) [ - 1 ] in os . path . split ( f ) [ - 1 ] ] ) > 0 :
print ( 'excluding %s' % tarinfo . name )
return None
else :
return tarinfo
if RANK == 0 :
print ( 'creating archive %s' % ( self . savefolder + '.tar' ) )
# open file
f = tarfile . open ( self . savefolder + '.tar' , 'w' )
# avoid adding files to repo as / scratch / $ USER / hybrid _ model / . . .
arcname = os . path . split ( self . savefolder ) [ - 1 ]
f . add ( name = self . savefolder , arcname = arcname , filter = filter_function )
f . close ( )
# resync
COMM . Barrier ( ) |
def formatBodyNode ( root , path ) :
'''Format the root node for use as the body node .''' | body = root
body . name = "body"
body . weight = calcFnWeight ( body )
body . path = path
body . pclass = None
return body |
def add_route ( self , handle_cls , path , name , category , handle_params = { } , is_menu = False , order = time . time ( ) , is_open = True , oem = False , ** kwargs ) :
"""注册权限""" | if not path :
return
if path in self . routes :
if self . routes [ path ] . get ( 'oem' ) :
return
self . routes [ path ] = dict ( path = path , # 权限url路径
name = name , # 权限名称
category = category , # 权限目录
is_menu = is_menu , # 是否在边栏显示为菜单
oprs = [ ] , # 关联的操作员
order = order , # 排序
is_open = is_open , # 是否开放授权
oem = oem # 是否定制功能
)
self . routes [ path ] . update ( ** kwargs )
self . add_handler ( handle_cls , path , handle_params ) |
def keys ( self , key = None , reverse = False ) :
"""sort the keys before returning them""" | ks = sorted ( list ( dict . keys ( self ) ) , key = key , reverse = reverse )
return ks |
def sort_list_by_index_list ( x : List [ Any ] , indexes : List [ int ] ) -> None :
"""Re - orders ` ` x ` ` by the list of ` ` indexes ` ` of ` ` x ` ` , in place .
Example :
. . code - block : : python
from cardinal _ pythonlib . lists import sort _ list _ by _ index _ list
z = [ " a " , " b " , " c " , " d " , " e " ]
sort _ list _ by _ index _ list ( z , [ 4 , 0 , 1 , 2 , 3 ] )
z # [ " e " , " a " , " b " , " c " , " d " ]""" | x [ : ] = [ x [ i ] for i in indexes ] |
def pair ( address , key ) :
'''Pair the bluetooth adapter with a device
CLI Example :
. . code - block : : bash
salt ' * ' bluetooth . pair DE : AD : BE : EF : CA : FE 1234
Where DE : AD : BE : EF : CA : FE is the address of the device to pair with , and 1234
is the passphrase .
TODO : This function is currently broken , as the bluez - simple - agent program
no longer ships with BlueZ > = 5.0 . It needs to be refactored .''' | if not salt . utils . validate . net . mac ( address ) :
raise CommandExecutionError ( 'Invalid BD address passed to bluetooth.pair' )
try :
int ( key )
except Exception :
raise CommandExecutionError ( 'bluetooth.pair requires a numerical key to be used' )
addy = address_ ( )
cmd = 'echo {0} | bluez-simple-agent {1} {2}' . format ( _cmd_quote ( addy [ 'device' ] ) , _cmd_quote ( address ) , _cmd_quote ( key ) )
out = __salt__ [ 'cmd.run' ] ( cmd , python_shell = True ) . splitlines ( )
return out |
def _calc_colour_hist ( img ) :
"""calculate colour histogram for each region
the size of output histogram will be BINS * COLOUR _ CHANNELS ( 3)
number of bins is 25 as same as [ uijlings _ ijcv2013 _ draft . pdf ]
extract HSV""" | BINS = 25
hist = numpy . array ( [ ] )
for colour_channel in ( 0 , 1 , 2 ) : # extracting one colour channel
c = img [ : , colour_channel ]
# calculate histogram for each colour and join to the result
hist = numpy . concatenate ( [ hist ] + [ numpy . histogram ( c , BINS , ( 0.0 , 255.0 ) ) [ 0 ] ] )
# L1 normalize
hist = hist / len ( img )
return hist |
def _Rzderiv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rzderiv
PURPOSE :
evaluate the mixed R , z derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
d2phi / dR / dz
HISTORY :
2015-02-07 - Written - Bovy ( IAS )""" | return self . _mn3 [ 0 ] . Rzderiv ( R , z , phi = phi , t = t ) + self . _mn3 [ 1 ] . Rzderiv ( R , z , phi = phi , t = t ) + self . _mn3 [ 2 ] . Rzderiv ( R , z , phi = phi , t = t ) |
def iterrepos ( self ) :
"""A generator function that yields a ( repo , [ items ] ) tuple for
each non - empty repo .""" | for repo , items in self . repo_items_hash . iteritems ( ) :
if items :
yield ( repo , items ) |
def update ( self ) :
"""Updates an instance within a project .
For example :
. . literalinclude : : snippets . py
: start - after : [ START bigtable _ update _ instance ]
: end - before : [ END bigtable _ update _ instance ]
. . note : :
Updates any or all of the following values :
` ` display _ name ` `
` ` type ` `
` ` labels ` `
To change a value before
updating , assign that values via
. . code : : python
instance . display _ name = ' New display name '
before calling : meth : ` update ` .
: rtype : : class : ` ~ google . api _ core . operation . Operation `
: returns : The long - running operation corresponding to the update
operation .""" | update_mask_pb = field_mask_pb2 . FieldMask ( )
if self . display_name is not None :
update_mask_pb . paths . append ( "display_name" )
if self . type_ is not None :
update_mask_pb . paths . append ( "type" )
if self . labels is not None :
update_mask_pb . paths . append ( "labels" )
instance_pb = instance_pb2 . Instance ( name = self . name , display_name = self . display_name , type = self . type_ , labels = self . labels , )
return self . _client . instance_admin_client . partial_update_instance ( instance = instance_pb , update_mask = update_mask_pb ) |
def _get_obj_attr ( cls , obj , path , pos ) :
"""Resolve one kwargsql expression for a given object and returns
its result .
: param obj : the object to evaluate
: param path : the list of all kwargsql expression , including those
previously evaluated .
: param int pos : provides index of the expression to evaluate in the
` path ` parameter .""" | field = path [ pos ]
if isinstance ( obj , ( dict , Mapping ) ) :
return obj [ field ] , pos
elif isinstance ( obj , ( list , Sequence ) ) :
join_operation = cls . SEQUENCE_OPERATIONS . get ( field )
if join_operation is not None :
return ( AnySequenceResult ( cls . _sequence_map ( obj , path [ pos + 1 : ] ) , join_operation ) , len ( path ) + 1 , )
else :
return obj [ int ( field ) ] , pos
else :
return getattr ( obj , field , None ) , pos |
def print_dedicated_access ( access ) :
"""Prints out the dedicated hosts a user can access""" | table = formatting . Table ( [ 'id' , 'Name' , 'Cpus' , 'Memory' , 'Disk' , 'Created' ] , 'Dedicated Access' )
for host in access :
host_id = host . get ( 'id' )
host_fqdn = host . get ( 'name' )
host_cpu = host . get ( 'cpuCount' )
host_mem = host . get ( 'memoryCapacity' )
host_disk = host . get ( 'diskCapacity' )
host_created = host . get ( 'createDate' )
table . add_row ( [ host_id , host_fqdn , host_cpu , host_mem , host_disk , host_created ] )
return table |
def codes_get_string_array ( handle , key , size , length = None ) : # type : ( cffi . FFI . CData , bytes , int , int ) - > T . List [ bytes ]
"""Get string array values from a key .
: param bytes key : the keyword whose value ( s ) are to be extracted
: rtype : T . List [ bytes ]""" | if length is None :
length = codes_get_string_length ( handle , key )
values_keepalive = [ ffi . new ( 'char[]' , length ) for _ in range ( size ) ]
values = ffi . new ( 'char*[]' , values_keepalive )
size_p = ffi . new ( 'size_t *' , size )
_codes_get_string_array ( handle , key . encode ( ENC ) , values , size_p )
return [ ffi . string ( values [ i ] ) . decode ( ENC ) for i in range ( size_p [ 0 ] ) ] |
def to_mongo ( qry ) :
"""Transform a simple query with one or more filter expressions
into a MongoDB query expression .
: param qry : Filter expression ( s ) , see function docstring for details .
: type qry : str or list
: return : MongoDB query
: rtype : dict
: raises : BadExpression , if one of the input expressions cannot be parsed
Expressions have three parts , called in order ` ` field ` ` , ` ` operator ` ` ,
and ` ` value ` ` .
- ` field ` is the name of a field in a MongoDB document
- ` value ` is the value to compare against :
* numeric
* string , you MUST use ' single ' or " double " quotes
* boolean : true , false
- ` operator ` is a comparison operator :
* inequalities : > , < , = , < = , > = , ! =
* PCRE regular expression : ~
* data type : int , float , string , or bool
* exists : boolean ( true / false ) whether field exists in record
* size : for array fields , an inequality for the array size , given as
a suffix to the operator : size > , size <
Multiple expressions can be a single string , or a list .
In either case , the form is a " disjunction of conjunctions " .
In the string form :
* " and " joins expressions into groups
* " or " joins one or more expression groups
In the list form :
* The inner list is a group of " and " ed expressions
* The outer list " or " s the expression groups together .
In the string form , parentheses before or after the " or " ed expression
groups [ note : even non - sensical ones like ' ( ( ( ' ] , are ignored .
So these can be used to clarify the groupings .
* * Examples * *
Two sets of filters , return records where either is true :
> > > to _ mongo ( ' ( a > 3 and b = " hello " ) or ( c > 1 and d = " goodbye " ) ' )
{ ' $ or ' : [ { ' a ' : { ' $ gt ' : 3 } , ' b ' : ' hello ' } , { ' c ' : { ' $ gt ' : 1 } , ' d ' : ' goodbye ' } ] }
Same as previous , but without parentheses :
> > > to _ mongo ( ' a > 3 and b = " hello " or c > 1 and d = " goodbye " ' )
{ ' $ or ' : [ { ' a ' : { ' $ gt ' : 3 } , ' b ' : ' hello ' } , { ' c ' : { ' $ gt ' : 1 } , ' d ' : ' goodbye ' } ] }
Same as previous , but using lists rather than " and " / " or " :
> > > to _ mongo ( [ [ ' a > 3 ' , ' b = " hello " ' ] , [ ' c > 1 ' , ' d = " goodbye " ' ] ] )
{ ' $ or ' : [ { ' a ' : { ' $ gt ' : 3 } , ' b ' : ' hello ' } , { ' c ' : { ' $ gt ' : 1 } , ' d ' : ' goodbye ' } ] }""" | rev = False
# filters , not constraints
# special case for empty string / list
if qry == "" or qry == [ ] :
return { }
# break input into groups of filters
unpar = lambda s : s . strip ( ) . strip ( '()' )
if isinstance ( qry , str ) :
groups = [ ]
if _TOK_OR in qry :
groups = [ unpar ( g ) . split ( _TOK_AND ) for g in qry . split ( _TOK_OR ) ]
else :
groups = [ unpar ( qry ) . split ( _TOK_AND ) ]
else :
if isinstance ( qry [ 0 ] , list ) or isinstance ( qry [ 0 ] , tuple ) :
groups = qry
else :
groups = [ qry ]
# generate mongodb queries for each filter group
filters = [ ]
for filter_exprs in groups :
mq = MongoQuery ( )
for e in filter_exprs :
try :
e = unpar ( e )
except AttributeError :
raise BadExpression ( e , "expected string, got '{t}'" . format ( t = type ( e ) ) )
try :
constraint = Constraint ( * parse_expr ( e ) )
except ValueError as err :
raise BadExpression ( e , err )
clause = MongoClause ( constraint , rev = rev )
mq . add_clause ( clause )
filters . append ( mq . to_mongo ( rev ) )
# combine together filters , or strip down the one filter
if len ( filters ) > 1 :
result = { '$or' : filters }
else :
result = filters [ 0 ]
return result |
def _get_resize_options ( self , dimensions ) :
""": param dimensions :
A tuple of ( width , height , force _ size ) .
' force _ size ' can be left off and will default to False .""" | if dimensions and isinstance ( dimensions , ( tuple , list ) ) :
if len ( dimensions ) < 3 :
dimensions = tuple ( dimensions ) + ( False , )
return dimensions |
def undefinedImageType ( self ) :
"""Returns the name of undefined image type for the invalid image .
. . versionadded : : 2.3.0""" | if self . _undefinedImageType is None :
ctx = SparkContext . _active_spark_context
self . _undefinedImageType = ctx . _jvm . org . apache . spark . ml . image . ImageSchema . undefinedImageType ( )
return self . _undefinedImageType |
def uninstall_handle_input ( self ) :
"""Remove the hook .""" | if self . hooked is None :
return
ctypes . windll . user32 . UnhookWindowsHookEx ( self . hooked )
self . hooked = None |
def tostype ( self , stype ) :
"""Return a copy of the array with chosen storage type .
Returns
NDArray or RowSparseNDArray
A copy of the array with the chosen storage stype""" | # pylint : disable = no - member , protected - access
if stype == 'csr' :
raise ValueError ( "cast_storage from row_sparse to csr is not supported" )
return op . cast_storage ( self , stype = stype ) |
def calculate ( self , variable_name , period , ** parameters ) :
"""Calculate the variable ` ` variable _ name ` ` for the period ` ` period ` ` , using the variable formula if it exists .
: returns : A numpy array containing the result of the calculation""" | population = self . get_variable_population ( variable_name )
holder = population . get_holder ( variable_name )
variable = self . tax_benefit_system . get_variable ( variable_name , check_existence = True )
if period is not None and not isinstance ( period , periods . Period ) :
period = periods . period ( period )
if self . trace :
self . tracer . record_calculation_start ( variable . name , period , ** parameters )
self . _check_period_consistency ( period , variable )
# First look for a value already cached
cached_array = holder . get_array ( period )
if cached_array is not None :
if self . trace :
self . tracer . record_calculation_end ( variable . name , period , cached_array , ** parameters )
return cached_array
array = None
# First , try to run a formula
try :
self . _check_for_cycle ( variable , period )
array = self . _run_formula ( variable , population , period )
# If no result , use the default value and cache it
if array is None :
array = holder . default_array ( )
array = self . _cast_formula_result ( array , variable )
holder . put_in_cache ( array , period )
except SpiralError :
array = holder . default_array ( )
finally :
if self . trace :
self . tracer . record_calculation_end ( variable . name , period , array , ** parameters )
self . _clean_cycle_detection_data ( variable . name )
self . purge_cache_of_invalid_values ( )
return array |
def gaussian_overlapping_coefficient ( means_0 , stds_0 , means_1 , stds_1 , lower = None , upper = None ) :
"""Compute the overlapping coefficient of two Gaussian continuous _ distributions .
This computes the : math : ` \ int _ { - \ infty } ^ { \ infty } { \ min ( f ( x ) , g ( x ) ) \ partial x } ` where
: math : ` f \ sim \ mathcal { N } ( \ mu _ 0 , \ sigma _ 0 ^ { 2 } ) ` and : math : ` f \ sim \ mathcal { N } ( \ mu _ 1 , \ sigma _ 1 ^ { 2 } ) ` are normally
distributed variables .
This will compute the overlap for each element in the first dimension .
Args :
means _ 0 ( ndarray ) : the set of means of the first distribution
stds _ 0 ( ndarray ) : the set of stds of the fist distribution
means _ 1 ( ndarray ) : the set of means of the second distribution
stds _ 1 ( ndarray ) : the set of stds of the second distribution
lower ( float ) : the lower limit of the integration . If not set we set it to - inf .
upper ( float ) : the upper limit of the integration . If not set we set it to + inf .""" | if lower is None :
lower = - np . inf
if upper is None :
upper = np . inf
def point_iterator ( ) :
for ind in range ( means_0 . shape [ 0 ] ) :
yield np . squeeze ( means_0 [ ind ] ) , np . squeeze ( stds_0 [ ind ] ) , np . squeeze ( means_1 [ ind ] ) , np . squeeze ( stds_1 [ ind ] )
return np . array ( list ( multiprocess_mapping ( _ComputeGaussianOverlap ( lower , upper ) , point_iterator ( ) ) ) ) |
def _verify_create_args ( module_name , class_name , static ) :
"""Verifies a subset of the arguments to create ( )""" | # Verify module name is provided
if module_name is None :
raise InvalidServiceConfiguration ( 'Service configurations must define a module' )
# Non - static services must define a class
if not static and class_name is None :
tmpl0 = 'Non-static service configurations must define a class: '
tmpl1 = 'module is %s'
raise InvalidServiceConfiguration ( ( tmpl0 + tmpl1 ) % module_name ) |
def event_list_tabs ( counts , current_kind , page_number = 1 ) :
"""Displays the tabs to different event _ list pages .
` counts ` is a dict of number of events for each kind , like :
{ ' all ' : 30 , ' gig ' : 12 , ' movie ' : 18 , }
` current _ kind ` is the event kind that ' s active , if any . e . g . ' gig ' ,
' movie ' , etc .
` page _ number ` is the current page of this kind of events we ' re on .""" | return { 'counts' : counts , 'current_kind' : current_kind , 'page_number' : page_number , # A list of all the kinds we might show tabs for , like
# [ ' gig ' , ' movie ' , ' play ' , . . . ]
'event_kinds' : Event . get_kinds ( ) , # A dict of data about each kind , keyed by kind ( ' gig ' ) including
# data about ' name ' , ' name _ plural ' and ' slug ' :
'event_kinds_data' : Event . get_kinds_data ( ) , } |
def update ( self , filename = None , batch_id = None , prev_batch_id = None , producer = None , count = None , ) :
"""Creates an history model instance .""" | # TODO : refactor model enforce unique batch _ id
# TODO : refactor model to not allow NULLs
if not filename :
raise BatchHistoryError ( "Invalid filename. Got None" )
if not batch_id :
raise BatchHistoryError ( "Invalid batch_id. Got None" )
if not prev_batch_id :
raise BatchHistoryError ( "Invalid prev_batch_id. Got None" )
if not producer :
raise BatchHistoryError ( "Invalid producer. Got None" )
if self . exists ( batch_id = batch_id ) :
raise IntegrityError ( "Duplicate batch_id" )
try :
obj = self . model . objects . get ( batch_id = batch_id )
except self . model . DoesNotExist :
obj = self . model ( filename = filename , batch_id = batch_id , prev_batch_id = prev_batch_id , producer = producer , total = count , )
obj . transaction_file . name = filename
obj . save ( )
return obj |
def locateChild ( self , ctx , segments ) :
"""Retrieve a L { SharingIndex } for a particular user , or rend . NotFound .""" | store = _storeFromUsername ( self . loginSystem . store , segments [ 0 ] . decode ( 'utf-8' ) )
if store is None :
return rend . NotFound
return ( SharingIndex ( store , self . webViewer ) , segments [ 1 : ] ) |
def block_events ( self ) :
"""Prevents the widget from sending signals .""" | self . _widget . blockSignals ( True )
self . _widget . setUpdatesEnabled ( False ) |
def register_domain ( self , domain = 0 , tokenizer = None , trie = None ) :
"""Register a domain with the intent engine .
Args :
tokenizer ( tokenizer ) : The tokenizer you wish to use .
trie ( Trie ) : the Trie ( ) you wish to use .
domain ( str ) : a string representing the domain you wish to add""" | self . domains [ domain ] = IntentDeterminationEngine ( tokenizer = tokenizer , trie = trie ) |
def find_element_by_id ( self , id_ , update = False ) -> Elements :
'''Finds an element by id .
Args :
id _ : The id of the element to be found .
update : If the interface has changed , this option should be True .
Returns :
The element if it was found .
Raises :
NoSuchElementException - If the element wasn ' t found .
Usage :
element = driver . find _ element _ by _ id ( ' foo ' )''' | return self . find_element ( by = By . ID , value = id_ , update = update ) |
def dumps ( self , value ) :
'''returns serialized ` value ` .''' | for serializer in self :
value = serializer . dumps ( value )
return value |
def _bind_target ( self , target , ctx = None ) :
"""Method to override in order to specialize binding of target .
: param target : target to bind .
: param ctx : target ctx .
: return : bound target .""" | result = target
try : # get annotations from target if exists .
local_annotations = get_local_property ( target , Annotation . __ANNOTATIONS_KEY__ , [ ] , ctx = ctx )
except TypeError :
raise TypeError ( 'target {0} must be hashable.' . format ( target ) )
# if local _ annotations do not exist , put them in target
if not local_annotations :
put_properties ( target , properties = { Annotation . __ANNOTATIONS_KEY__ : local_annotations } , ctx = ctx )
# insert self at first position
local_annotations . insert ( 0 , self )
# add target to self targets
if target not in self . targets :
self . targets . append ( target )
return result |
def get_gallery_all ( self , username = '' , offset = 0 , limit = 10 ) :
"""Get all of a user ' s deviations
: param username : The user to query , defaults to current user
: param offset : the pagination offset
: param limit : the pagination limit""" | if not username :
raise DeviantartError ( 'No username defined.' )
response = self . _req ( '/gallery/all' , { 'username' : username , 'offset' : offset , 'limit' : limit } )
deviations = [ ]
for item in response [ 'results' ] :
d = Deviation ( )
d . from_dict ( item )
deviations . append ( d )
if "name" in response :
name = response [ 'name' ]
else :
name = None
return { "results" : deviations , "name" : name , "has_more" : response [ 'has_more' ] , "next_offset" : response [ 'next_offset' ] } |
def read_slice ( self , firstrow , lastrow , step = 1 , ** keys ) :
"""Read the specified row slice from a table .
Read all rows between firstrow and lastrow ( non - inclusive , as per
python slice notation ) . Note you must use slice notation for
images , e . g . f [ ext ] [ 20:30 , 40:50]
parameters
firstrow : integer
The first row to read
lastrow : integer
The last row to read , non - inclusive . This follows the python list
slice convention that one does not include the last element .
step : integer , optional
Step between rows , default 1 . e . g . , if step is 2 , skip every other
row .
vstorage : string , optional
Over - ride the default method to store variable length columns . Can
be ' fixed ' or ' object ' . See docs on fitsio . FITS for details .
lower : bool , optional
If True , force all columns names to lower case in output . Will over
ride the lower = keyword from construction .
upper : bool , optional
If True , force all columns names to upper case in output . Will over
ride the lower = keyword from construction .""" | if self . _info [ 'hdutype' ] == ASCII_TBL :
rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' )
keys [ 'rows' ] = rows
return self . read_ascii ( ** keys )
step = keys . get ( 'step' , 1 )
if self . _info [ 'hdutype' ] == IMAGE_HDU :
raise ValueError ( "slices currently only supported for tables" )
maxrow = self . _info [ 'nrows' ]
if firstrow < 0 or lastrow > maxrow :
raise ValueError ( "slice must specify a sub-range of [%d,%d]" % ( 0 , maxrow ) )
dtype , offsets , isvar = self . get_rec_dtype ( ** keys )
w , = numpy . where ( isvar == True )
# noqa
if w . size > 0 :
vstorage = keys . get ( 'vstorage' , self . _vstorage )
rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' )
colnums = self . _extract_colnums ( )
array = self . _read_rec_with_var ( colnums , rows , dtype , offsets , isvar , vstorage )
else :
if step != 1 :
rows = numpy . arange ( firstrow , lastrow , step , dtype = 'i8' )
array = self . read ( rows = rows )
else : # no + 1 because lastrow is non - inclusive
nrows = lastrow - firstrow
array = numpy . zeros ( nrows , dtype = dtype )
# only first needs to be + 1 . This is becuase the c code is
# inclusive
self . _FITS . read_as_rec ( self . _ext + 1 , firstrow + 1 , lastrow , array )
array = self . _maybe_decode_fits_ascii_strings_to_unicode_py3 ( array )
for colnum , name in enumerate ( array . dtype . names ) :
self . _rescale_and_convert_field_inplace ( array , name , self . _info [ 'colinfo' ] [ colnum ] [ 'tscale' ] , self . _info [ 'colinfo' ] [ colnum ] [ 'tzero' ] )
lower = keys . get ( 'lower' , False )
upper = keys . get ( 'upper' , False )
if self . lower or lower :
_names_to_lower_if_recarray ( array )
elif self . upper or upper :
_names_to_upper_if_recarray ( array )
self . _maybe_trim_strings ( array , ** keys )
return array |
def merge_groups ( adata , key , map_groups , key_added = None , map_colors = None ) :
"""Parameters
map _ colors : ` dict `
Dict with color specification for new groups that have no corresponding
old group .""" | if key_added is None :
key_added = key + '_merged'
adata . obs [ key_added ] = adata . obs [ key ] . map ( map_groups ) . astype ( CategoricalDtype ( ) )
old_categories = adata . obs [ key ] . cat . categories
new_categories = adata . obs [ key_added ] . cat . categories
# map _ colors is passed
if map_colors is not None :
old_colors = None
if key + '_colors' in adata . uns :
old_colors = adata . uns [ key + '_colors' ]
new_colors = [ ]
for group in adata . obs [ key_added ] . cat . categories :
if group in map_colors :
new_colors . append ( map_colors [ group ] )
elif group in old_categories and old_colors is not None :
new_colors . append ( old_colors [ old_categories . get_loc ( group ) ] )
else :
raise ValueError ( 'You didn\'t specify a color for {}.' . format ( group ) )
adata . uns [ key_added + '_colors' ] = new_colors
# map _ colors is not passed
elif key + '_colors' in adata . uns :
old_colors = adata . uns [ key + '_colors' ]
inverse_map_groups = { g : [ ] for g in new_categories }
for old_group in old_categories :
inverse_map_groups [ map_groups [ old_group ] ] . append ( old_group )
new_colors = [ ]
for group in new_categories : # take the largest of the old groups
old_group = adata . obs [ key ] [ adata . obs [ key ] . isin ( inverse_map_groups [ group ] ) ] . value_counts ( ) . index [ 0 ]
new_colors . append ( old_colors [ old_categories . get_loc ( old_group ) ] )
adata . uns [ key_added + '_colors' ] = new_colors |
def get_filename ( self , instance ) :
"""Get the filename""" | filename = self . field . getFilename ( instance )
if filename :
return filename
fieldname = self . get_field_name ( )
content_type = self . get_content_type ( instance )
extension = mimetypes . guess_extension ( content_type )
return fieldname + extension |
def _get_start_end ( parts , index = 7 ) :
"""Retrieve start and end for a VCF record , skips BNDs without END coords""" | start = parts [ 1 ]
end = [ x . split ( "=" ) [ - 1 ] for x in parts [ index ] . split ( ";" ) if x . startswith ( "END=" ) ]
if end :
end = end [ 0 ]
return start , end
return None , None |
def date_fromnow ( self , value ) :
"""Displays humanized date ( time since )""" | import humanize
language = self . get_language ( )
if language != 'en' :
humanize . i18n . activate ( language )
return Markup ( humanize . naturaltime ( value ) ) |
def forward ( inputs_i , Q , G , A , b , h , U_Q , U_S , R , verbose = False ) :
"""b = A z _ 0
h = G z _ 0 + s _ 0
U _ Q , U _ S , R = pre _ factor _ kkt ( Q , G , A , nineq , neq )""" | nineq , nz , neq , _ = get_sizes ( G , A )
# find initial values
d = torch . ones ( nineq ) . type_as ( Q )
nb = - b if b is not None else None
factor_kkt ( U_S , R , d )
x , s , z , y = solve_kkt ( U_Q , d , G , A , U_S , inputs_i , torch . zeros ( nineq ) . type_as ( Q ) , - h , nb )
# x1 , s1 , z1 , y1 = factor _ solve _ kkt ( Q , torch . eye ( nineq ) . type _ as ( Q ) , G , A , inputs _ i ,
# torch . zeros ( nineq ) . type _ as ( Q ) , - h , nb )
if torch . min ( s ) < 0 :
s -= torch . min ( s ) - 1
if torch . min ( z ) < 0 :
z -= torch . min ( z ) - 1
prev_resid = None
for i in range ( 20 ) : # affine scaling direction
rx = ( torch . mv ( A . t ( ) , y ) if neq > 0 else 0. ) + torch . mv ( G . t ( ) , z ) + torch . mv ( Q , x ) + inputs_i
rs = z
rz = torch . mv ( G , x ) + s - h
ry = torch . mv ( A , x ) - b if neq > 0 else torch . Tensor ( [ 0. ] )
mu = torch . dot ( s , z ) / nineq
pri_resid = torch . norm ( ry ) + torch . norm ( rz )
dual_resid = torch . norm ( rx )
resid = pri_resid + dual_resid + nineq * mu
d = z / s
if verbose :
print ( ( "primal_res = {0:.5g}, dual_res = {1:.5g}, " + "gap = {2:.5g}, kappa(d) = {3:.5g}" ) . format ( pri_resid , dual_resid , mu , min ( d ) / max ( d ) ) )
# if ( pri _ resid < 5e - 4 and dual _ resid < 5e - 4 and mu < 4e - 4 ) :
improved = ( prev_resid is None ) or ( resid < prev_resid + 1e-6 )
if not improved or resid < 1e-6 :
return x , y , z
prev_resid = resid
factor_kkt ( U_S , R , d )
dx_aff , ds_aff , dz_aff , dy_aff = solve_kkt ( U_Q , d , G , A , U_S , rx , rs , rz , ry )
# D = torch . diag ( ( z / s ) . cpu ( ) ) . type _ as ( Q )
# dx _ aff1 , ds _ aff1 , dz _ aff1 , dy _ aff1 = factor _ solve _ kkt ( Q , D , G , A , rx , rs , rz , ry )
# compute centering directions
alpha = min ( min ( get_step ( z , dz_aff ) , get_step ( s , ds_aff ) ) , 1.0 )
sig = ( torch . dot ( s + alpha * ds_aff , z + alpha * dz_aff ) / ( torch . dot ( s , z ) ) ) ** 3
dx_cor , ds_cor , dz_cor , dy_cor = solve_kkt ( U_Q , d , G , A , U_S , torch . zeros ( nz ) . type_as ( Q ) , ( - mu * sig * torch . ones ( nineq ) . type_as ( Q ) + ds_aff * dz_aff ) / s , torch . zeros ( nineq ) . type_as ( Q ) , torch . zeros ( neq ) . type_as ( Q ) )
# dx _ cor , ds _ cor , dz _ cor , dy _ cor = factor _ solve _ kkt ( Q , D , G , A ,
# torch . zeros ( nz ) . type _ as ( Q ) ,
# ( - mu * sig * torch . ones ( nineq ) . type _ as ( Q ) + ds _ aff * dz _ aff ) / s ,
# torch . zeros ( nineq ) . type _ as ( Q ) , torch . zeros ( neq ) . type _ as ( Q ) )
dx = dx_aff + dx_cor
ds = ds_aff + ds_cor
dz = dz_aff + dz_cor
dy = dy_aff + dy_cor if neq > 0 else None
alpha = min ( 1.0 , 0.999 * min ( get_step ( s , ds ) , get_step ( z , dz ) ) )
dx_norm = torch . norm ( dx )
dz_norm = torch . norm ( dz )
if np . isnan ( dx_norm ) or dx_norm > 1e5 or dz_norm > 1e5 : # Overflow , return early
return x , y , z
x += alpha * dx
s += alpha * ds
z += alpha * dz
y = y + alpha * dy if neq > 0 else None
return x , y , z |
def get_requirements ( opts ) :
'''Get the proper requirements file based on the optional argument''' | if opts . dev :
name = 'requirements_dev.txt'
elif opts . doc :
name = 'requirements_doc.txt'
else :
name = 'requirements.txt'
requirements_file = os . path . join ( os . path . dirname ( __file__ ) , name )
install_requires = [ line . strip ( ) . replace ( '==' , '>=' ) for line in open ( requirements_file ) if not line . strip ( ) . startswith ( '#' ) and line . strip ( ) != '' ]
return install_requires |
def delete_embedded ( self , rel = None , href = lambda _ : True ) :
"""Removes an embedded resource from this document .
Calling code should use this method to remove embedded resources
instead of modifying ` ` embedded ` ` directly .
The optional arguments , ` ` rel ` ` and ` ` href ` ` are used to select the
embedded resources that will be removed . If neither of the optional
arguments are given , this method removes every embedded resource from
this document . If ` ` rel ` ` is given , only embedded resources for the
matching link relationship type are removed . If ` ` href ` ` is given , only
embedded resources with a ` ` self ` ` link matching ` ` href ` ` are deleted .
If both ` ` rel ` ` and ` ` href ` ` are given , only embedded resources with
matching ` ` self ` ` link for the matching link relationship type are
removed .
Arguments :
- ` ` rel ` ` : an optional string specifying the link relationship type of
the embedded resources to be removed .
- ` ` href ` ` : optionally , a string specifying the ` ` href ` ` of the
` ` self ` ` links of the resources to be removed , or a
callable that returns true when its single argument matches
the ` ` href ` ` of the ` ` self ` ` link of one of the resources
to be removed .""" | if EMBEDDED_KEY not in self . o :
return
if rel is None :
for rel in list ( self . o [ EMBEDDED_KEY ] . keys ( ) ) :
self . delete_embedded ( rel , href )
return
if rel not in self . o [ EMBEDDED_KEY ] :
return
if callable ( href ) :
url_filter = href
else :
url_filter = lambda x : x == href
rel_embeds = self . o [ EMBEDDED_KEY ] [ rel ]
if isinstance ( rel_embeds , dict ) :
del self . o [ EMBEDDED_KEY ] [ rel ]
if not self . o [ EMBEDDED_KEY ] :
del self . o [ EMBEDDED_KEY ]
return
new_rel_embeds = [ ]
for embedded in list ( rel_embeds ) :
embedded_doc = Document ( embedded , self . base_uri )
if not url_filter ( embedded_doc . url ( ) ) :
new_rel_embeds . append ( embedded )
if not new_rel_embeds :
del self . o [ EMBEDDED_KEY ] [ rel ]
elif len ( new_rel_embeds ) == 1 :
self . o [ EMBEDDED_KEY ] [ rel ] = new_rel_embeds [ 0 ]
else :
self . o [ EMBEDDED_KEY ] [ rel ] = new_rel_embeds
if not self . o [ EMBEDDED_KEY ] :
del self . o [ EMBEDDED_KEY ] |
def detect ( self ) :
"""Detect IP and return it .""" | for theip in self . rips :
LOG . debug ( "detected %s" , str ( theip ) )
self . set_current_value ( str ( theip ) )
return str ( theip ) |
def check_partial ( func , * args , ** kwargs ) :
"""Create a partial to be used by goodtables .""" | new_func = partial ( func , * args , ** kwargs )
new_func . check = func . check
return new_func |
def as_page ( self ) :
"""Wrap this Tag as a self - contained webpage . Create a page with
the following structure :
. . code - block : : html
< ! DOCTYPE html >
< html >
< head >
< meta http - equiv = " Content - type "
content = " text / html "
charset = " UTF - 8 " / >
{ self . resources }
< / head >
< body >
{ self }
< / body >
< / html >""" | H = HTML ( )
utf8 = H . meta ( { 'http-equiv' : 'Content-type' } , content = "text/html" , charset = "UTF-8" )
return H . inline ( H . raw ( '<!DOCTYPE html>' ) , H . html ( H . head ( utf8 , * self . resources ) , H . body ( self ) ) ) |
def get_connectivity ( self , measure_name , plot = False ) :
"""Calculate spectral connectivity measure .
Parameters
measure _ name : str
Name of the connectivity measure to calculate . See : class : ` Connectivity ` for supported measures .
plot : { False , None , Figure object } , optional
Whether and where to plot the connectivity . If set to * * False * * , nothing is plotted . Otherwise set to the
Figure object . If set to * * None * * , a new figure is created .
Returns
measure : array , shape = [ n _ channels , n _ channels , nfft ]
Values of the connectivity measure .
fig : Figure object
Instance of the figure in which was plotted . This is only returned if ` plot ` is not * * False * * .
Raises
RuntimeError
If the : class : ` Workspace ` instance does not contain a fitted VAR model .""" | if self . connectivity_ is None :
raise RuntimeError ( "Connectivity requires a VAR model (run do_mvarica or fit_var first)" )
cm = getattr ( self . connectivity_ , measure_name ) ( )
cm = np . abs ( cm ) if np . any ( np . iscomplex ( cm ) ) else cm
if plot is None or plot :
fig = plot
if self . plot_diagonal == 'fill' :
diagonal = 0
elif self . plot_diagonal == 'S' :
diagonal = - 1
sm = np . abs ( self . connectivity_ . S ( ) )
sm /= np . max ( sm )
# scale to 1 since components are scaled arbitrarily anyway
fig = self . plotting . plot_connectivity_spectrum ( sm , fs = self . fs_ , freq_range = self . plot_f_range , diagonal = 1 , border = self . plot_outside_topo , fig = fig )
else :
diagonal = - 1
fig = self . plotting . plot_connectivity_spectrum ( cm , fs = self . fs_ , freq_range = self . plot_f_range , diagonal = diagonal , border = self . plot_outside_topo , fig = fig )
return cm , fig
return cm |
def enumerate_file_hash ( self , url , file_url , timeout = 15 , headers = { } ) :
"""Gets the MD5 of requests . get ( url + file _ url ) .
@ param url : the installation ' s base URL .
@ param file _ url : the url of the file to hash .
@ param timeout : the number of seconds to wait prior to a timeout .
@ param headers : a dictionary to pass to requests . get ( )""" | r = self . session . get ( url + file_url , timeout = timeout , headers = headers )
if r . status_code == 200 :
hash = hashlib . md5 ( r . content ) . hexdigest ( )
return hash
else :
raise RuntimeError ( "File '%s' returned status code '%s'." % ( file_url , r . status_code ) ) |
def expiration_extractor ( widget , data ) :
"""Extract expiration information .
- If active flag not set , Account is disabled ( value 0 ) .
- If active flag set and value is UNSET , account never expires .
- If active flag set and datetime choosen , account expires at given
datetime .
- Timestamp in seconds since epoch is returned .""" | active = int ( data . request . get ( '%s.active' % widget . name , '0' ) )
if not active :
return 0
expires = data . extracted
if expires :
return time . mktime ( expires . utctimetuple ( ) )
return UNSET |
def insertDataset ( self , blockcontent , otptIdList , migration = False ) :
"""This method insert a datsset from a block object into dbs .""" | dataset = blockcontent [ 'dataset' ]
conn = self . dbi . connection ( )
# First , check and see if the dataset exists .
try :
datasetID = self . datasetid . execute ( conn , dataset [ 'dataset' ] )
dataset [ 'dataset_id' ] = datasetID
except KeyError as ex :
if conn :
conn . close ( )
dbsExceptionHandler ( "dbsException-invalid-input2" , "DBSBlockInsert/InsertDataset: Dataset is required.\
Exception: %s. troubled dataset are: %s" % ( ex . args [ 0 ] , dataset ) , self . logger . exception , "DBSBlockInsert/InsertDataset: Dataset is required.\
Exception: %s. troubled dataset are: %s" % ( ex . args [ 0 ] , dataset ) )
except Exception as ex1 :
if conn :
conn . close ( )
raise ex1
if datasetID > 0 : # Then we already have a valid dataset . We only need to fill the map ( dataset & output module config )
# Skip to the END
try :
self . insertDatasetWOannex ( dataset = dataset , blockcontent = blockcontent , otptIdList = otptIdList , conn = conn , insertDataset = False , migration = migration )
finally :
if conn :
conn . close ( )
return datasetID
# Else , we need to do the work
# Start a new transaction
tran = conn . begin ( )
primary_ds_name = ''
try : # 1 . Deal with primary dataset . Most primary datasets are
# pre - installed in db
primds = blockcontent [ "primds" ]
primary_ds_name = primds [ "primary_ds_name" ]
primds [ "primary_ds_id" ] = self . primdsid . execute ( conn , primds [ "primary_ds_name" ] , transaction = tran )
if primds [ "primary_ds_id" ] <= 0 : # primary dataset is not in db yet .
try :
primds [ "primary_ds_id" ] = self . sm . increment ( conn , "SEQ_PDS" )
primds [ "creation_date" ] = primds . get ( "creation_date" , dbsUtils ( ) . getTime ( ) )
if not migration :
primds [ "create_by" ] = dbsUtils ( ) . getCreateBy ( )
self . primdsin . execute ( conn , primds , tran )
except exceptions . IntegrityError as ex :
if ( str ( ex ) . find ( "ORA-00001" ) != - 1 and str ( ex ) . find ( "TUC_PDS_PRIMARY_DS_NAME" ) != - 1 ) or str ( ex ) . lower ( ) . find ( "duplicate" ) != - 1 :
primds [ "primary_ds_id" ] = self . primdsid . execute ( conn , primds [ "primary_ds_name" ] , transaction = tran )
if primds [ "primary_ds_id" ] <= 0 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-conflict-data' , 'Primary dataset not yet inserted by concurrent insert. ' , self . logger . exception , 'Primary dataset not yet inserted by concurrent insert. ' + str ( ex ) )
elif str ( ex ) . find ( "ORA-01400" ) > - 1 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-missing-data' , 'Missing data when insert primary_datasets. ' , self . logger . exception , 'Missing data when insert primary_datasets. ' + str ( ex ) )
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'Invalid data when insert primary_datasets. ' , self . logger . exception , 'Invalid data when insert primary_datasets. ' + str ( ex ) )
except Exception as ex :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
dataset [ 'primary_ds_id' ] = primds [ "primary_ds_id" ]
# 2 Deal with processed ds
# processed ds is handled inside of dataset insertion , However we need to make sure it is formatted correctly .
# processed _ ds _ name is not required pre - exist in the db . will insert with the dataset if not in yet
# processed _ ds _ name = acquisition _ era _ name [ - filter _ name ] [ - processing _ str ] - vprocessing _ version
# Note [ - filterName ] is new as 4/30/2012 . See ticket # 3655 . YG
# althrough acquisition era and processing version is not required for a dataset
# in the schema ( the schema is build this way because
# we need to accommodate the DBS2 data ) , but we impose the requirement on the API .
# So both acquisition and processing eras are required .
# We do the format checking after we deal with acquisition era and processing era .
# YG 12/07/2011 TK - 362
# 3 Deal with Acquisition era
aq = blockcontent . get ( 'acquisition_era' , { } )
has_acquisition_era_name = 'acquisition_era_name' in aq
has_start_date = 'start_date' in aq
def insert_acquisition_era ( ) :
try : # insert acquisition era into db
aq [ 'acquisition_era_id' ] = self . sm . increment ( conn , "SEQ_AQE" )
self . acqin . execute ( conn , aq , tran )
dataset [ 'acquisition_era_id' ] = aq [ 'acquisition_era_id' ]
except exceptions . IntegrityError as ei : # ORA - 01400 : cannot insert NULL into required columns , usually it is the NULL on start _ date
if "ORA-01400" in str ( ei ) :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( "dbsException-invalid-input2" , "BlockInsert: acquisition_era_name and start_date are required. \
NULL was received from user input. Please correct your data." )
# ok , already in db ?
if ( str ( ei ) . find ( "ORA-00001" ) != - 1 and str ( ei ) . find ( "TUC_AQE_ACQUISITION_ERA_NAME" ) != - 1 ) or str ( ei ) . lower ( ) . find ( "duplicate" ) != - 1 :
dataset [ 'acquisition_era_id' ] = self . acqid . execute ( conn , aq [ 'acquisition_era_name' ] )
if dataset [ 'acquisition_era_id' ] <= 0 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( "dbsException-invalid-input2" , "BlockInsert: \
Check the spelling of acquisition Era name. The db may already have the same \
acquisition era, but with different cases." , self . logger . exception , "BlockInsert: \
Check the spelling of acquisition Era name. The db may already have the same \
acquisition era, but with different cases." )
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'Invalid data when insert acquisition_eras . ' , self . logger . exception , 'Invalid data when insert acquisition_eras. ' + str ( ei ) )
except Exception :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
if has_acquisition_era_name and has_start_date :
insert_acquisition_era ( )
elif migration and not has_acquisition_era_name : # if no processing era is available , for example for old DBS 2 data , skip insertion
aq [ 'acquisition_era_id' ] = None
dataset [ 'acquisition_era_id' ] = None
elif migration and not aq [ 'start_date' ] :
aq [ 'start_date' ] = 0
insert_acquisition_era ( )
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( "dbsException-invalid-input2" , "BlockInsert: Acquisition Era is required" , self . logger . exception , "BlockInsert: Acquisition Era is required" )
# 4 Deal with Processing era
pera = blockcontent . get ( 'processing_era' , { } )
if 'processing_version' in pera :
try : # insert processing era into db
pera [ 'processing_era_id' ] = self . sm . increment ( conn , "SEQ_PE" )
# pera [ ' processing _ version ' ] = pera [ ' processing _ version ' ] . upper ( )
self . procsingin . execute ( conn , pera , tran )
dataset [ 'processing_era_id' ] = pera [ 'processing_era_id' ]
except exceptions . IntegrityError as ex :
if ( str ( ex ) . find ( "ORA-00001: unique constraint" ) != - 1 and str ( ex ) . find ( "TUC_PE_PROCESSING_VERSION" ) != - 1 ) or str ( ex ) . lower ( ) . find ( "duplicate" ) != - 1 : # ok , already in db
dataset [ 'processing_era_id' ] = self . procsingid . execute ( conn , pera [ 'processing_version' ] )
elif str ( ex ) . find ( "ORA-01400" ) > - 1 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-missing-data' , 'Missing data when insert processing_eras. ' , self . logger . exception , 'Missing data when insert Processing_eras. ' + str ( ex ) )
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'Invalid data when insert Processing_ears. ' , self . logger . exception , 'Invalid data when insert Processing_eras. ' + str ( ex ) )
except Exception as ex :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
elif migration : # if no processing era is available , for example for old DBS 2 data , skip insertion
pera [ 'processing_era_id' ] = None
dataset [ 'processing_era_id' ] = None
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'BlockInsert:processing version is required' )
# Make sure processed _ ds _ name is right format .
# processed _ ds _ name = acquisition _ era _ name [ - filter _ name ] [ - processing _ str ] - vprocessing _ version
# In order to accommodate DBS2 data for migration , we turn off this check in migration .
# These will not cause any problem to none DBS2 data because when we migration , the none DBS2 data is
# already checked when they were inserted into the source dbs . YG 7/12/2012
if not migration and aq [ "acquisition_era_name" ] != "CRAB" and aq [ "acquisition_era_name" ] != "LHE" :
erals = dataset [ "processed_ds_name" ] . rsplit ( '-' )
if erals [ 0 ] != aq [ "acquisition_era_name" ] or erals [ len ( erals ) - 1 ] != "%s%s" % ( "v" , pera [ "processing_version" ] ) :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , "BlockInsert:\
processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified." , self . logger . exception , "BlockInsert: processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified." )
# So far so good , let ' s commit first 4 db activities before going on .
tran . commit ( )
except KeyError as ex :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( "dbsException-invalid-input2" , "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " % ex . args [ 0 ] , self . logger . exception , "DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s." % ex . args [ 0 ] )
except :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
# Continue for the rest .
tran = conn . begin ( )
try : # 5 Deal with physics gruop
phg = dataset [ 'physics_group_name' ]
if phg : # Yes , the dataset has physica group .
phgId = self . phygrpid . execute ( conn , phg , transaction = tran )
if phgId <= 0 : # not in db yet , insert it
phgId = self . sm . increment ( conn , "SEQ_PG" )
phygrp = { 'physics_group_id' : phgId , 'physics_group_name' : phg }
try :
self . phygrpin . execute ( conn , phygrp , tran )
except exceptions . IntegrityError as ex :
if str ( ex ) . find ( "ORA-00001" ) != - 1 and str ( ex ) . find ( "PK_PG" ) != - 1 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( message = 'InsertPhysicsGroup Error' , logger = self . logger . exception , serverError = "InsertPhysicsGroup: " + str ( ex ) )
if ( str ( ex ) . find ( "ORA-00001" ) != - 1 and str ( ex ) . find ( "TUC_PG_PHYSICS_GROUP_NAME" ) != - 1 ) or str ( ex ) . lower ( ) . find ( "duplicate" ) != - 1 :
phgId = self . phygrpid . execute ( conn , phg , transaction = tran )
if phgId <= 0 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( message = 'InsertPhysicsGroup Error ' , logger = self . logger . exception , serverError = "InsertPhysicsGroup: " + str ( ex ) )
elif str ( ex ) . find ( "ORA-01400" ) > - 1 :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-missing-data' , 'Missing data when insert Physics_groups. ' , self . logger . exception , 'Missing data when insert Physics_groups. ' + str ( ex ) )
else :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'Invalid data when insert Physics_groups. ' , self . logger . exception , 'Invalid data when insert Physics_groups. ' + str ( ex ) )
except Exception as ex :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
dataset [ 'physics_group_id' ] = phgId
# self . logger . debug ( " * * * PHYSICS _ GROUP _ ID = % s * * * " % phgId )
else : # no physics gruop for the dataset .
dataset [ 'physics_group_id' ] = None
del dataset [ 'physics_group_name' ]
# 6 Deal with Data tier . A dataset must has a data tier
dataset [ 'data_tier_name' ] = dataset [ 'data_tier_name' ] . upper ( )
# We no longer handle Tier inside dataset insert . If a data tier is no in DBS before the dataset
# is inserted . We will report error back to the user as missing data . See github issue # 466.
# This is to prevent users to insert random data tiers into phys * DB . YG May - 15-2015
dtId = 0
dtId = self . tierid . execute ( conn , dataset [ 'data_tier_name' ] )
# When no data tier found , it return tier id - 1
if dtId <= 0 :
dbsExceptionHandler ( 'dbsException-missing-data' , 'Required data tier %s not found in DBS when insert dataset. Ask your admin adding the tier before insert/migrate the block/dataset.' % dataset [ 'data_tier_name' ] , self . logger . exception , 'Required data tier not found in DBS when insert dataset. ' )
# 7 Deal with dataset access type . A dataset must have a data type
dataset [ 'dataset_access_type' ] = dataset [ 'dataset_access_type' ] . upper ( )
# handle dataset access type inside dataset insertion with Inser2.
tran . commit ( )
except Exception as ex :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
raise
# Before we insert the dataset , we need to make sure dataset = / primary _ dataset _ name / processed _ dataset _ name / data _ tier
d2 = dataset [ 'dataset' ] . rsplit ( '/' )
if ( d2 [ 1 ] != primary_ds_name or d2 [ 2 ] != dataset [ "processed_ds_name" ] or d2 [ 3 ] != dataset [ 'data_tier_name' ] ) :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
dbsExceptionHandler ( 'dbsException-invalid-input2' , 'dataset=/primary_dataset_name/processed_dataset_name/data_tier is not matched.' , self . logger . exception , 'dataset=/primary_dataset_name/processed_dataset_name/data_tier is not matched.' )
try : # self . logger . debug ( " * * * Trying to insert the dataset * * * " )
dataset [ 'dataset_id' ] = self . insertDatasetWOannex ( dataset = dataset , blockcontent = blockcontent , otptIdList = otptIdList , conn = conn , insertDataset = True , migration = migration )
finally :
if tran :
tran . rollback ( )
if conn :
conn . close ( )
return dataset [ 'dataset_id' ] |
def Clift ( Re ) :
r'''Calculates drag coefficient of a smooth sphere using the method in
[1 ] _ as described in [ 2 ] _ .
. . math : :
C _ D = \ left \ { \ begin { array } { ll }
\ frac { 24 } { Re } + \ frac { 3 } { 16 } & \ mbox { if $ Re < 0.01 $ } \ \
\ frac { 24 } { Re } ( 1 + 0.1315Re ^ { 0.82 - 0.05 \ log Re } ) & \ mbox { if $ 0.01 < Re < 20 $ } \ \
\ frac { 24 } { Re } ( 1 + 0.1935Re ^ { 0.6305 } ) & \ mbox { if $ 20 < Re < 260 $ } \ \
10 ^ { [ 1.6435 - 1.1242 \ log Re + 0.1558 [ \ log Re ] ^ 2 } & \ mbox { if $ 260 < Re < 1500 $ } \ \
10 ^ { [ - 2.4571 + 2.5558 \ log Re - 0.9295 [ \ log Re ] ^ 2 + 0.1049 [ \ log Re ] ^ 3 } & \ mbox { if $ 1500 < Re < 12000 $ } \ \
10 ^ { [ - 1.9181 + 0.6370 \ log Re - 0.0636 [ \ log Re ] ^ 2 } & \ mbox { if $ 12000 < Re < 44000 $ } \ \
10 ^ { [ - 4.3390 + 1.5809 \ log Re - 0.1546 [ \ log Re ] ^ 2 } & \ mbox { if $ 44000 < Re < 338000 $ } \ \
9.78 - 5.3 \ log Re & \ mbox { if $ 338000 < Re < 400000 $ } \ \
0.19 \ log Re - 0.49 & \ mbox { if $ 400000 < Re < 100000 $ } \ end { array } \ right .
Parameters
Re : float
Reynolds number of the sphere , [ - ]
Returns
Cd : float
Drag coefficient [ - ]
Notes
Range is Re < = 1E6.
Examples
> > > Clift ( 200)
0.7756342422322543
References
. . [ 1 ] R . Clift , J . R . Grace , M . E . Weber , Bubbles , Drops , and Particles ,
Academic , New York , 1978.
. . [ 2 ] Barati , Reza , Seyed Ali Akbar Salehi Neyshabouri , and Goodarz
Ahmadi . " Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere : An
Evolutionary Approach . " Powder Technology 257 ( May 2014 ) : 11-19.
doi : 10.1016 / j . powtec . 2014.02.045.''' | if Re < 0.01 :
Cd = 24. / Re + 3 / 16.
elif Re < 20 :
Cd = 24. / Re * ( 1 + 0.1315 * Re ** ( 0.82 - 0.05 * log10 ( Re ) ) )
elif Re < 260 :
Cd = 24. / Re * ( 1 + 0.1935 * Re ** ( 0.6305 ) )
elif Re < 1500 :
Cd = 10 ** ( 1.6435 - 1.1242 * log10 ( Re ) + 0.1558 * ( log10 ( Re ) ) ** 2 )
elif Re < 12000 :
Cd = 10 ** ( - 2.4571 + 2.5558 * log10 ( Re ) - 0.9295 * ( log10 ( Re ) ) ** 2 + 0.1049 * log10 ( Re ) ** 3 )
elif Re < 44000 :
Cd = 10 ** ( - 1.9181 + 0.6370 * log10 ( Re ) - 0.0636 * ( log10 ( Re ) ) ** 2 )
elif Re < 338000 :
Cd = 10 ** ( - 4.3390 + 1.5809 * log10 ( Re ) - 0.1546 * ( log10 ( Re ) ) ** 2 )
elif Re < 400000 :
Cd = 29.78 - 5.3 * log10 ( Re )
else :
Cd = 0.19 * log10 ( Re ) - 0.49
return Cd |
def prune_empty_node ( node , seen ) :
"""Recursively remove empty branches and return whether this makes the node
itself empty .
The ` ` seen ` ` parameter is used to avoid infinite recursion due to cycles
( you never know ) .""" | if node . methods :
return False
if id ( node ) in seen :
return True
seen = seen | { id ( node ) }
for branch in list ( node . branches ) :
if prune_empty_node ( branch , seen ) :
node . branches . remove ( branch )
else :
return False
return True |
def trace ( ) :
"""trace finds the line , the filename
and error message and returns it
to the user""" | import traceback , inspect , sys
tb = sys . exc_info ( ) [ 2 ]
tbinfo = traceback . format_tb ( tb ) [ 0 ]
filename = inspect . getfile ( inspect . currentframe ( ) )
# script name + line number
line = tbinfo . split ( ", " ) [ 1 ]
# Get Python syntax error
synerror = traceback . format_exc ( ) . splitlines ( ) [ - 1 ]
return line , filename , synerror |
def set_helical_radius ( self , filename , bp , atomname = 'P' , full = False , bp_range = True ) :
"""To read and set local helical radius of both strand
Parameters
filename : str
Input file , which is generated from do _ x3dna . e . g . HelixRad _ g . dat
bp : 1D list or array
base - pairs to analyze
Example : : :
bp = [ 6 ] # bp _ range = False
bp = [ 4,15 ] # bp _ range = True
bp = range ( 4,15 ) # bp _ range = False
bp = np . arange ( 4,15 ) # bp _ range = False
bp = [ 2,5,6,7,9,12,18 ] # bp _ range = False
atomname : str
Atom name to consider for the DNA helix ( accepted keywords :
` ` P ` ` , ` ` O4 * ` ` , ` ` O4 ' ` ` , ` ` C1 * ` ` and ` ` C1 ` ` )
full : bool
To calculate full helical radius . Overrides atomname option and
uses atom ` ` P ` ` , and 1 A is added to the radius calculated by 3DNA
package
bp _ range : bool
` ` Default = True ` ` : As shown above , if ` ` True ` ` , bp is taken as a
range otherwise list or numpy array .""" | if not ( isinstance ( bp , list ) or isinstance ( bp , np . ndarray ) ) :
raise AssertionError ( "type %s is not list or np.ndarray" % type ( bp ) )
if not ( ( atomname == 'P' ) or ( atomname == 'O4*' ) or ( atomname == 'C1*' ) or ( atomname == 'O4\'' ) or ( atomname == 'C1\'' ) ) :
print ( '\n This atomname {0} is not implemented... Exiting\n' . format ( atomname ) )
# Check if requested parameters found within input file
gotParametersInputFile = checkParametersInputFile ( filename )
if gotParametersInputFile is None :
raise IOError ( ' Something wrong in input file {0}.\n Cannot read parameters.\n File should be an output from do_x3dna.' . format ( filename ) )
for p in helicalRadiusParameters :
if p not in gotParametersInputFile :
raise ValueError ( ' Helical radius not found in input file. \n This file contains following parameters: \n {0}' . format ( gotParametersInputFile ) )
parameter = [ ]
if ( atomname == 'P' ) or ( full ) :
parameter = [ 1 , 4 ]
if ( atomname == 'O4*' or atomname == 'O4\'' ) and ( not full ) :
parameter = [ 2 , 5 ]
if ( atomname == 'C1*' or atomname == 'C1\'' ) and ( not full ) :
parameter = [ 3 , 6 ]
data , time = read_param_file ( filename , [ 1 , 2 , 3 , 4 , 5 , 6 ] , bp , bp_range , startBP = self . startBP )
self . _set_time ( time )
bp_idx , param_idx = get_idx_of_bp_parameters ( bp , parameter , bp_range , startBP = self . startBP )
if full :
data = np . add ( data , 1.0 )
for i in range ( len ( data ) ) :
bp_num = str ( bp_idx [ i ] + self . startBP )
if ( atomname == 'P' ) or ( full ) :
self . _set_data ( data [ i ] [ 0 ] , 'bps' , bp_num , 'radius s-1' , scaleoffset = 1 )
self . _set_data ( data [ i ] [ 3 ] , 'bps' , bp_num , 'radius s-2' , scaleoffset = 1 )
if ( atomname == 'O4*' or atomname == 'O4\'' ) :
self . _set_data ( data [ i ] [ 1 ] , 'bps' , bp_num , 'radius s-1' , scaleoffset = 1 )
self . _set_data ( data [ i ] [ 4 ] , 'bps' , bp_num , 'radius s-2' , scaleoffset = 1 )
if ( atomname == 'C1*' or atomname == 'C1\'' ) :
self . _set_data ( data [ i ] [ 2 ] , 'bps' , bp_num , 'radius s-1' , scaleoffset = 1 )
self . _set_data ( data [ i ] [ 5 ] , 'bps' , bp_num , 'radius s-2' , scaleoffset = 1 ) |
def create ( self , recording_status_callback_event = values . unset , recording_status_callback = values . unset , recording_status_callback_method = values . unset , trim = values . unset , recording_channels = values . unset ) :
"""Create a new RecordingInstance
: param unicode recording _ status _ callback _ event : The recording status changes that should generate a callback
: param unicode recording _ status _ callback : The callback URL on each selected recording event
: param unicode recording _ status _ callback _ method : The HTTP method we should use to call ` recording _ status _ callback `
: param unicode trim : Whether to trim the silence in the recording
: param unicode recording _ channels : The number of channels that the output recording will be configured with
: returns : Newly created RecordingInstance
: rtype : twilio . rest . api . v2010 . account . call . recording . RecordingInstance""" | data = values . of ( { 'RecordingStatusCallbackEvent' : serialize . map ( recording_status_callback_event , lambda e : e ) , 'RecordingStatusCallback' : recording_status_callback , 'RecordingStatusCallbackMethod' : recording_status_callback_method , 'Trim' : trim , 'RecordingChannels' : recording_channels , } )
payload = self . _version . create ( 'POST' , self . _uri , data = data , )
return RecordingInstance ( self . _version , payload , account_sid = self . _solution [ 'account_sid' ] , call_sid = self . _solution [ 'call_sid' ] , ) |
def columnclean ( column ) :
"""Modifies column header format to be importable into a database
: param column : raw column header
: return : cleanedcolumn : reformatted column header""" | cleanedcolumn = str ( column ) . replace ( '%' , 'percent' ) . replace ( '(' , '_' ) . replace ( ')' , '' ) . replace ( 'As' , 'Adenosines' ) . replace ( 'Cs' , 'Cytosines' ) . replace ( 'Gs' , 'Guanines' ) . replace ( 'Ts' , 'Thymines' ) . replace ( 'Ns' , 'Unknowns' ) . replace ( 'index' , 'adapterIndex' )
return cleanedcolumn |
def list_dvportgroups ( dvs = None , portgroup_names = None , service_instance = None ) :
'''Returns a list of distributed virtual switch portgroups .
The list can be filtered by the portgroup names or by the DVS .
dvs
Name of the DVS containing the portgroups .
Default value is None .
portgroup _ names
List of portgroup names to look for . If None , all portgroups are
returned .
Default value is None
service _ instance
Service instance ( vim . ServiceInstance ) of the vCenter .
Default is None .
. . code - block : : bash
salt ' * ' vsphere . list _ dvporgroups
salt ' * ' vsphere . list _ dvportgroups dvs = dvs1
salt ' * ' vsphere . list _ dvportgroups portgroup _ names = [ pg1]
salt ' * ' vsphere . list _ dvportgroups dvs = dvs1 portgroup _ names = [ pg1]''' | ret_dict = [ ]
proxy_type = get_proxy_type ( )
if proxy_type == 'esxdatacenter' :
datacenter = __salt__ [ 'esxdatacenter.get_details' ] ( ) [ 'datacenter' ]
dc_ref = _get_proxy_target ( service_instance )
elif proxy_type == 'esxcluster' :
datacenter = __salt__ [ 'esxcluster.get_details' ] ( ) [ 'datacenter' ]
dc_ref = salt . utils . vmware . get_datacenter ( service_instance , datacenter )
if dvs :
dvs_refs = salt . utils . vmware . get_dvss ( dc_ref , dvs_names = [ dvs ] )
if not dvs_refs :
raise VMwareObjectRetrievalError ( 'DVS \'{0}\' was not ' 'retrieved' . format ( dvs ) )
dvs_ref = dvs_refs [ 0 ]
get_all_portgroups = True if not portgroup_names else False
for pg_ref in salt . utils . vmware . get_dvportgroups ( parent_ref = dvs_ref if dvs else dc_ref , portgroup_names = portgroup_names , get_all_portgroups = get_all_portgroups ) :
ret_dict . append ( _get_dvportgroup_dict ( pg_ref ) )
return ret_dict |
def get_linode ( kwargs = None , call = None ) :
'''Returns data for a single named Linode .
name
The name of the Linode for which to get data . Can be used instead
` ` linode _ id ` ` . Note this will induce an additional API call
compared to using ` ` linode _ id ` ` .
linode _ id
The ID of the Linode for which to get data . Can be used instead of
` ` name ` ` .
CLI Example :
. . code - block : : bash
salt - cloud - f get _ linode my - linode - config name = my - instance
salt - cloud - f get _ linode my - linode - config linode _ id = 1234567''' | if call == 'action' :
raise SaltCloudSystemExit ( 'The get_linode function must be called with -f or --function.' )
if kwargs is None :
kwargs = { }
name = kwargs . get ( 'name' , None )
linode_id = kwargs . get ( 'linode_id' , None )
if name is None and linode_id is None :
raise SaltCloudSystemExit ( 'The get_linode function requires either a \'name\' or a \'linode_id\'.' )
if linode_id is None :
linode_id = get_linode_id_from_name ( name )
result = _query ( 'linode' , 'list' , args = { 'LinodeID' : linode_id } )
return result [ 'DATA' ] [ 0 ] |
def get ( self ) :
"""Parse a response into string format and clear out its temporary containers
: return : The parsed response message
: rtype : str""" | self . _log . debug ( 'Converting Response object to string format' )
response = '' . join ( map ( str , self . _response ) ) . strip ( )
self . _log . debug ( 'Resetting parent Trigger temporary containers' )
self . stars = { 'normalized' : ( ) , 'case_preserved' : ( ) , 'raw' : ( ) }
user = self . trigger . user
self . trigger . user = None
if self . redirect :
self . _log . info ( 'Redirecting response to: {msg}' . format ( msg = response ) )
groups = self . agentml . request_log . most_recent ( ) . groups
response = self . agentml . get_reply ( user . id , response , groups )
if not response :
self . _log . info ( 'Failed to retrieve a valid response when redirecting' )
return ''
return response |
def parse_xml_boundary ( xml_region ) :
"""Get the geographic bounds from an XML element
Args :
xml _ region ( Element ) : The < region > tag as XML Element
Returns :
GeographicBB :""" | try :
bounds = { }
for boundary in xml_region . getchildren ( ) :
bounds [ boundary . tag ] = float ( boundary . text )
bbox = GeographicBB ( min_lon = bounds [ "west" ] , max_lon = bounds [ "east" ] , min_lat = bounds [ "south" ] , max_lat = bounds [ "north" ] )
return bbox
except ( KeyError , ValueError ) :
raise MapSourceException ( "region boundaries are invalid. " ) |
def from_string ( cls , constraint ) :
""": param str constraint : The string representation of a constraint
: rtype : : class : ` MarathonConstraint `""" | obj = constraint . split ( ':' )
marathon_constraint = cls . from_json ( obj )
if marathon_constraint :
return marathon_constraint
raise ValueError ( "Invalid string format. " "Expected `field:operator:value`" ) |
def iter_column ( self , query = None , field = "_id" , ** kwargs ) :
"""Return one field as an iterator .
Beware that if your query returns records where the field is not set , it will raise a KeyError .""" | find_kwargs = { "projection" : { "_id" : False } }
find_kwargs [ "projection" ] [ field ] = True
cursor = self . _collection_with_options ( kwargs ) . find ( query , ** find_kwargs )
# We only want 1 field : bypass the ORM
patch_cursor ( cursor , ** kwargs )
return ( dotdict ( x ) [ field ] for x in cursor ) |
def _read_bits ( cls , raw_value ) :
"""Generator that takes a memory view and provides bitfields from it .
After creating the generator , call ` send ( None ) ` to initialise it , and
thereafter call ` send ( need _ bits ) ` to obtain that many bits .""" | have_bits = 0
bits = 0
byte_source = iter ( raw_value )
result = 0
while True :
need_bits = yield result
while have_bits < need_bits :
try :
bits = ( bits << 8 ) | int ( next ( byte_source ) )
have_bits += 8
except StopIteration :
return
result = int ( bits >> ( have_bits - need_bits ) )
bits &= ( 1 << ( have_bits - need_bits ) ) - 1
have_bits -= need_bits |
def echo ( message = None , file = None , nl = True , err = False , color = None , carriage_return = False ) :
"""Patched click echo function .""" | message = message or ""
if carriage_return and nl :
click_echo ( message + "\r\n" , file , False , err , color )
elif carriage_return and not nl :
click_echo ( message + "\r" , file , False , err , color )
else :
click_echo ( message , file , nl , err , color ) |
def pow2 ( x : int , p : int ) -> int :
"""= = pow ( x , 2 * * p , q )""" | while p > 0 :
x = x * x % q
p -= 1
return x |
def extra_downloader_converter ( value ) :
"""Parses extra _ { downloader , converter } arguments .
Parameters
value : iterable or str
If the value is a string , it is split into a list using spaces
as delimiters . Otherwise , it is returned as is .""" | if isinstance ( value , six . string_types ) :
value = value . split ( " " )
return value |
def pckcov ( pck , idcode , cover ) :
"""Find the coverage window for a specified reference frame in a
specified binary PCK file .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / pckcov _ c . html
: param pck : Name of PCK file .
: type pck : str
: param idcode : Class ID code of PCK reference frame .
: type idcode : int
: param cover : Window giving coverage in pck for idcode .
: type cover : SpiceCell""" | pck = stypes . stringToCharP ( pck )
idcode = ctypes . c_int ( idcode )
assert isinstance ( cover , stypes . SpiceCell )
assert cover . dtype == 1
libspice . pckcov_c ( pck , idcode , ctypes . byref ( cover ) ) |
async def start ( self ) :
"""Start api initialization .""" | _LOGGER . debug ( 'Initializing pyEight Version: %s' , __version__ )
await self . fetch_token ( )
if self . _token is not None :
await self . fetch_device_list ( )
await self . assign_users ( )
return True
else : # We couldn ' t authenticate
return False |
def dot_v2 ( vec1 , vec2 ) :
"""Return the dot product of two vectors""" | return vec1 . x * vec2 . x + vec1 . y * vec2 . y |
def and_next ( e ) :
"""Create a PEG function for positive lookahead .""" | def match_and_next ( s , grm = None , pos = 0 ) :
try :
e ( s , grm , pos )
except PegreError as ex :
raise PegreError ( 'Positive lookahead failed' , pos )
else :
return PegreResult ( s , Ignore , ( pos , pos ) )
return match_and_next |
def app_start ( name , profile , ** kwargs ) :
"""Start an application with specified profile .
Does nothing if application is already running .""" | ctx = Context ( ** kwargs )
ctx . execute_action ( 'app:start' , ** { 'node' : ctx . repo . create_secure_service ( 'node' ) , 'name' : name , 'profile' : profile } ) |
def get_access_token ( self , method = 'POST' , decoder = parse_utf8_qsl , key = 'access_token' , ** kwargs ) :
'''Returns an access token .
: param method : A string representation of the HTTP method to be used ,
defaults to ` POST ` .
: type method : str
: param decoder : A function used to parse the Response content . Should
return a dictionary .
: type decoder : func
: param key : The key the access token will be decoded by , defaults to
' access _ token ' .
: type string :
: param \ * \ * kwargs : Optional arguments . Same as Requests .
: type \ * \ * kwargs : dict''' | r = self . get_raw_access_token ( method , ** kwargs )
access_token , = process_token_request ( r , decoder , key )
return access_token |
def serialisasi ( self ) :
"""Mengembalikan hasil serialisasi objek Entri ini .
: returns : Dictionary hasil serialisasi
: rtype : dict""" | return { "nama" : self . nama , "nomor" : self . nomor , "kata_dasar" : self . kata_dasar , "pelafalan" : self . pelafalan , "bentuk_tidak_baku" : self . bentuk_tidak_baku , "varian" : self . varian , "makna" : [ makna . serialisasi ( ) for makna in self . makna ] } |
def _set_slave_enabled ( self , dpid , port , enabled ) :
"""set whether a slave i / f at some port of some datapath is
enable or not .""" | slave = self . _get_slave ( dpid , port )
if slave :
slave [ 'enabled' ] = enabled |
def _excel2num ( x ) :
"""Convert Excel column name like ' AB ' to 0 - based column index .
Parameters
x : str
The Excel column name to convert to a 0 - based column index .
Returns
num : int
The column index corresponding to the name .
Raises
ValueError
Part of the Excel column name was invalid .""" | index = 0
for c in x . upper ( ) . strip ( ) :
cp = ord ( c )
if cp < ord ( "A" ) or cp > ord ( "Z" ) :
raise ValueError ( "Invalid column name: {x}" . format ( x = x ) )
index = index * 26 + cp - ord ( "A" ) + 1
return index - 1 |
def _contribute_to_class ( self , mcs_args : McsArgs ) :
"""Where the magic happens . Takes one parameter , the : class : ` McsArgs ` of the
class - under - construction , and processes the declared ` ` class Meta ` ` from
it ( if any ) . We fill ourself with the declared meta options ' name / value pairs ,
give the declared meta options a chance to also contribute to the class - under -
construction , and finally replace the class - under - construction ' s ` ` class Meta ` `
with this populated factory instance ( aka ` ` self ` ` ) .""" | self . _mcs_args = mcs_args
Meta = mcs_args . clsdict . pop ( 'Meta' , None )
# type : Type [ object ]
base_classes_meta = mcs_args . getattr ( 'Meta' , None )
# type : MetaOptionsFactory
mcs_args . clsdict [ 'Meta' ] = self
# must come before _ fill _ from _ meta , because
# some meta options may depend upon having
# access to the values of earlier meta options
self . _fill_from_meta ( Meta , base_classes_meta , mcs_args )
for option in self . _get_meta_options ( ) :
option_value = getattr ( self , option . name , None )
option . contribute_to_class ( mcs_args , option_value ) |
def url_encode ( obj , charset = 'utf-8' , encode_keys = False , sort = False , key = None , separator = '&' ) :
"""URL encode a dict / ` MultiDict ` . If a value is ` None ` it will not appear
in the result string . Per default only values are encoded into the target
charset strings . If ` encode _ keys ` is set to ` ` True ` ` unicode keys are
supported too .
If ` sort ` is set to ` True ` the items are sorted by ` key ` or the default
sorting algorithm .
. . versionadded : : 0.5
` sort ` , ` key ` , and ` separator ` were added .
: param obj : the object to encode into a query string .
: param charset : the charset of the query string .
: param encode _ keys : set to ` True ` if you have unicode keys .
: param sort : set to ` True ` if you want parameters to be sorted by ` key ` .
: param separator : the separator to be used for the pairs .
: param key : an optional function to be used for sorting . For more details
check out the : func : ` sorted ` documentation .""" | return separator . join ( _url_encode_impl ( obj , charset , encode_keys , sort , key ) ) |
def jacobian_from_model ( model , as_functions = False ) :
"""Build a : class : ` ~ symfit . core . fit . CallableModel ` representing the Jacobian of
` ` model ` ` .
This function make sure the chain rule is correctly applied for
interdependent variables .
: param model : Any symbolical model - type .
: param as _ functions : If ` True ` , the result is returned using
: class : ` sympy . core . function . Function ` where needed , e . g .
` ` { y ( x , a ) : a * x } ` ` instead of ` ` { y : a * x } ` ` .
: return : : class : ` ~ symfit . core . fit . CallableModel ` representing the Jacobian
of ` ` model ` ` .""" | # Inverse dict so we can turn functions back into vars in the end
functions_as_vars = dict ( ( v , k ) for k , v in model . vars_as_functions . items ( ) )
# Create the jacobian components . The ` vars ` here in the model _ dict are
# always of the type D ( y , a ) , but the righthand - side might still contain
# functions instead of vars depending on the value of ` as _ functions ` .
jac = { }
for func , expr in model . function_dict . items ( ) :
for param in model . params :
target = D ( func , param )
dfdp = expr . diff ( param )
if as_functions :
jac [ _partial_subs ( target , functions_as_vars ) ] = dfdp
else : # Turn Function objects back into Variables .
dfdp = dfdp . subs ( functions_as_vars , evaluate = False )
jac [ _partial_subs ( target , functions_as_vars ) ] = dfdp
# Next lines are needed for the Hessian , where the components of model still
# contain functions instead of vars .
if as_functions :
jac . update ( model )
else :
jac . update ( { y : expr . subs ( functions_as_vars , evaluate = False ) for y , expr in model . items ( ) } )
jacobian_model = CallableModel ( jac )
return jacobian_model |
def start_batch ( job , input_args ) :
"""This function will administer 5 jobs at a time then recursively call itself until subset is empty""" | samples = parse_sra ( input_args [ 'sra' ] )
# for analysis _ id in samples :
job . addChildJobFn ( download_and_transfer_sample , input_args , samples , cores = 1 , disk = '30' ) |
def _callback ( self ) :
"""The actual callback .""" | if self . debug : # Show the number of open file descriptors
print ( ">>>>> _callback: Number of open file descriptors: %s" % get_open_fds ( ) )
self . _runem_all ( )
# Mission accomplished . Shutdown the scheduler .
all_ok = self . flow . all_ok
if all_ok :
return self . shutdown ( msg = "All tasks have reached S_OK. Will shutdown the scheduler and exit" )
# Handle failures .
err_lines = [ ]
# Shall we send a reminder to the user ?
delta_etime = self . get_delta_etime ( )
if delta_etime . total_seconds ( ) > self . num_reminders * self . remindme_s :
self . num_reminders += 1
msg = ( "Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s " % ( self . pid , self . flow , delta_etime ) )
retcode = self . send_email ( msg , tag = "[REMINDER]" )
if retcode : # Cannot send mail , shutdown now !
msg += ( "\nThe scheduler tried to send an e-mail to remind the user\n" + " but send_email returned %d. Error is not critical though!" % retcode )
print ( msg )
# err _ lines . append ( msg )
# if delta _ etime . total _ seconds ( ) > self . max _ etime _ s :
# err _ lines . append ( " \ nExceeded max _ etime _ s % s . Will shutdown the scheduler and exit " % self . max _ etime _ s )
# Too many exceptions . Shutdown the scheduler .
if self . num_excs > self . max_num_pyexcs :
msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % ( self . num_excs , self . max_num_pyexcs )
err_lines . append ( boxed ( msg ) )
# Paranoid check : disable the scheduler if we have submitted
# too many jobs ( it might be due to some bug or other external reasons
# such as race conditions between difference callbacks ! )
if self . nlaunch > self . safety_ratio * self . flow . num_tasks :
msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % ( self . nlaunch , self . flow . num_tasks )
err_lines . append ( boxed ( msg ) )
# Count the number of tasks with status = = S _ ERROR .
if self . flow . num_errored_tasks > self . max_num_abierrs :
msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % ( self . flow . num_errored_tasks , self . max_num_abierrs )
err_lines . append ( boxed ( msg ) )
# Test on the presence of deadlocks .
g = self . flow . find_deadlocks ( )
if g . deadlocked : # Check the flow again so that status are updated .
self . flow . check_status ( )
g = self . flow . find_deadlocks ( )
# print ( " deadlocked : \ n " , g . deadlocked , " \ nrunnables : \ n " , g . runnables , " \ nrunning \ n " , g . running )
print ( "deadlocked:" , len ( g . deadlocked ) , ", runnables:" , len ( g . runnables ) , ", running:" , len ( g . running ) )
if g . deadlocked and not g . runnables and not g . running :
err_lines . append ( "No runnable job with deadlocked tasks:\n%s." % str ( g . deadlocked ) )
if not g . runnables and not g . running : # Check the flow again so that status are updated .
self . flow . check_status ( )
g = self . flow . find_deadlocks ( )
if not g . runnables and not g . running :
err_lines . append ( "No task is running and cannot find other tasks to submit." )
# Something wrong . Quit
if err_lines : # Cancel all jobs .
if self . killjobs_if_errors :
cprint ( "killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting." , "yellow" )
try :
num_cancelled = 0
for task in self . flow . iflat_tasks ( ) :
num_cancelled += task . cancel ( )
cprint ( "Killed %d tasks" % num_cancelled , "yellow" )
except Exception as exc :
cprint ( "Exception while trying to kill jobs:\n%s" % str ( exc ) , "red" )
self . shutdown ( "\n" . join ( err_lines ) )
return len ( self . exceptions ) |
def execute ( self , input_data ) :
'''Execute the ViewMemoryDeep worker''' | # Aggregate the output from all the memory workers , clearly this could be kewler
output = input_data [ 'view_memory' ]
output [ 'tables' ] = { }
for data in [ input_data [ key ] for key in ViewMemoryDeep . dependencies ] :
for name , table in data [ 'tables' ] . iteritems ( ) :
output [ 'tables' ] . update ( { name : table } )
return output |
def _add_access_token_to_response ( self , response , access_token ) : # type : ( oic . message . AccessTokenResponse , se _ leg _ op . access _ token . AccessToken ) - > None
"""Adds the Access Token and the associated parameters to the Token Response .""" | response [ 'access_token' ] = access_token . value
response [ 'token_type' ] = access_token . type
response [ 'expires_in' ] = access_token . expires_in |
def synchronized ( lock ) :
"""Synchronization decorator ; provide thread - safe locking on a function
http : / / code . activestate . com / recipes / 465057/""" | def wrap ( f ) :
def synchronize ( * args , ** kw ) :
lock . acquire ( )
try :
return f ( * args , ** kw )
finally :
lock . release ( )
return synchronize
return wrap |
def name ( self ) :
"""User name ( the same name as on the users community profile page ) .
: rtype : str""" | uid = self . user_id
if self . _iface_user . get_id ( ) == uid :
return self . _iface . get_my_name ( )
return self . _iface . get_name ( uid ) |
def print_help ( self , * args , ** kwargs ) :
"""Add pager support to help output .""" | if self . _command is not None and self . _command . session . allow_pager :
desc = 'Help\: %s' % '-' . join ( self . prog . split ( ) )
pager_kwargs = self . _command . get_pager_spec ( )
with paging . pager_redirect ( desc , ** pager_kwargs ) :
return super ( ) . print_help ( * args , ** kwargs )
else :
return super ( ) . print_help ( * args , ** kwargs ) |
def get_language ( language_name ) :
"""Returns a callable that instantiates meta - model for the given language .""" | langs = list ( pkg_resources . iter_entry_points ( group = LANG_EP , name = language_name ) )
if not langs :
raise TextXError ( 'Language "{}" is not registered.' . format ( language_name ) )
if len ( langs ) > 1 : # Multiple languages defined with the same name
raise TextXError ( 'Language "{}" registered multiple times:\n{}' . format ( language_name , "\n" . join ( [ l . dist for l in langs ] ) ) )
return langs [ 0 ] . load ( ) ( ) |
def calculate_tensor_to_probability_map_output_shapes ( operator ) :
'''Allowed input / output patterns are
ONNX < 1.2
1 . [ 1 , C ] - - - > - - - > A map
2 . [ 1 , C _ 1 , . . . , C _ n ] - - - > A map
ONNX > = 1.2
1 . [ N , C ] - - - > - - - > A sequence of maps
2 . [ N , C _ 1 , . . . , C _ n ] - - - > A sequence of maps
Note that N must be 1 currently if you ' re using ONNX < 1.2 because old ZipMap doesn ' t produce a seqneuce of map If the
input is not [ N , C ] , it will be reshaped into [ N , C _ 1 x C _ 2 , x . . . x C _ n ] before being fed into ONNX ZipMap .''' | check_input_and_output_numbers ( operator , input_count_range = 1 , output_count_range = 1 )
check_input_and_output_types ( operator , good_input_types = [ FloatTensorType ] )
model_type = operator . raw_operator . WhichOneof ( 'Type' )
if model_type == 'neuralNetworkClassifier' :
class_label_type = operator . raw_operator . neuralNetworkClassifier . WhichOneof ( 'ClassLabels' )
else :
raise TypeError ( '%s has no class label' % model_type )
N = operator . inputs [ 0 ] . type . shape [ 0 ]
doc_string = operator . outputs [ 0 ] . type . doc_string
if class_label_type == 'stringClassLabels' :
if operator . target_opset < 7 :
operator . outputs [ 0 ] . type = DictionaryType ( StringTensorType ( [ 1 ] ) , FloatTensorType ( [ 1 ] ) , doc_string )
else :
operator . outputs [ 0 ] . type = SequenceType ( DictionaryType ( StringTensorType ( [ ] ) , FloatTensorType ( [ ] ) ) , N , doc_string )
elif class_label_type == 'int64ClassLabels' :
if operator . target_opset < 7 :
operator . outputs [ 0 ] . type = DictionaryType ( Int64TensorType ( [ 1 ] ) , FloatTensorType ( [ 1 ] ) , doc_string )
else :
operator . outputs [ 0 ] . type = SequenceType ( DictionaryType ( Int64TensorType ( [ ] ) , FloatTensorType ( [ ] ) ) , N , doc_string )
else :
raise ValueError ( 'Unsupported label type' ) |
def populate ( self , source = DEFAULT_SEGMENT_SERVER , segments = None , pad = True , ** kwargs ) :
"""Query the segment database for this flag ' s active segments .
This method assumes all of the metadata for each flag have been
filled . Minimally , the following attributes must be filled
. . autosummary : :
~ DataQualityFlag . name
~ DataQualityFlag . known
Segments will be fetched from the database , with any
: attr : ` ~ DataQualityFlag . padding ` added on - the - fly .
This ` DataQualityFlag ` will be modified in - place .
Parameters
source : ` str `
source of segments for this flag . This must be
either a URL for a segment database or a path to a file on disk .
segments : ` SegmentList ` , optional
a list of segments during which to query , if not given ,
existing known segments for this flag will be used .
pad : ` bool ` , optional , default : ` True `
apply the ` ~ DataQualityFlag . padding ` associated with this
flag , default : ` True ` .
* * kwargs
any other keyword arguments to be passed to
: meth : ` DataQualityFlag . query ` or : meth : ` DataQualityFlag . read ` .
Returns
self : ` DataQualityFlag `
a reference to this flag""" | tmp = DataQualityDict ( )
tmp [ self . name ] = self
tmp . populate ( source = source , segments = segments , pad = pad , ** kwargs )
return tmp [ self . name ] |
def file_put_contents ( self , path , data ) :
"""Put passed contents into file located at ' path '""" | path = self . get_full_file_path ( path )
# if file exists , create a temp copy to allow rollback
if os . path . isfile ( path ) :
tmp_path = self . new_tmp ( )
self . do_action ( { 'do' : [ 'copy' , path , tmp_path ] , 'undo' : [ 'move' , tmp_path , path ] } )
self . do_action ( { 'do' : [ 'write' , path , data ] , 'undo' : [ 'backup' , path ] } ) |
def batch_remove_retrain ( nmask_train , nmask_test , X_train , y_train , X_test , y_test , attr_train , attr_test , model_generator , metric ) :
"""An approximation of holdout that only retraines the model once .
This is alse called ROAR ( RemOve And Retrain ) in work by Google . It is much more computationally
efficient that the holdout method because it masks the most important features in every sample
and then retrains the model once , instead of retraining the model for every test sample like
the holdout metric .""" | warnings . warn ( "The retrain based measures can incorrectly evaluate models in some cases!" )
X_train , X_test = to_array ( X_train , X_test )
# how many features to mask
assert X_train . shape [ 1 ] == X_test . shape [ 1 ]
# mask nmask top features for each explanation
X_train_tmp = X_train . copy ( )
X_train_mean = X_train . mean ( 0 )
tie_breaking_noise = const_rand ( X_train . shape [ 1 ] ) * 1e-6
for i in range ( len ( y_train ) ) :
if nmask_train [ i ] > 0 :
ordering = np . argsort ( - attr_train [ i , : ] + tie_breaking_noise )
X_train_tmp [ i , ordering [ : nmask_train [ i ] ] ] = X_train_mean [ ordering [ : nmask_train [ i ] ] ]
X_test_tmp = X_test . copy ( )
for i in range ( len ( y_test ) ) :
if nmask_test [ i ] > 0 :
ordering = np . argsort ( - attr_test [ i , : ] + tie_breaking_noise )
X_test_tmp [ i , ordering [ : nmask_test [ i ] ] ] = X_train_mean [ ordering [ : nmask_test [ i ] ] ]
# train the model with all the given features masked
model_masked = model_generator ( )
model_masked . fit ( X_train_tmp , y_train )
yp_test_masked = model_masked . predict ( X_test_tmp )
return metric ( y_test , yp_test_masked ) |
def encode ( precision , with_z ) :
"""Given GeoJSON on stdin , writes a geobuf file to stdout .""" | logger = logging . getLogger ( 'geobuf' )
stdin = click . get_text_stream ( 'stdin' )
sink = click . get_binary_stream ( 'stdout' )
try :
data = json . load ( stdin )
pbf = geobuf . encode ( data , precision if precision >= 0 else 6 , 3 if with_z else 2 )
sink . write ( pbf )
sys . exit ( 0 )
except Exception :
logger . exception ( "Failed. Exception caught" )
sys . exit ( 1 ) |
def count_star ( self ) -> int :
"""Implements the ` ` COUNT ( * ) ` ` specialization .""" | count_query = ( self . statement . with_only_columns ( [ func . count ( ) ] ) . order_by ( None ) )
return self . session . execute ( count_query ) . scalar ( ) |
def connectTo ( self , node , cls = None ) :
"""Creates a connection between this node and the inputed node .
: param node | < XNode >
cls | < subclass of XNodeConnection > | | None
: return < XNodeConnection >""" | if ( not node ) :
return
con = self . scene ( ) . addConnection ( cls )
con . setOutputNode ( self )
con . setInputNode ( node )
return con |
def sum ( data , start = 0 ) :
"""sum ( data [ , start ] ) - > value
Return a high - precision sum of the given numeric data . If optional
argument ` ` start ` ` is given , it is added to the total . If ` ` data ` ` is
empty , ` ` start ` ` ( defaulting to 0 ) is returned .""" | n , d = exact_ratio ( start )
T = type ( start )
partials = { d : n }
# map { denominator : sum of numerators }
# Micro - optimizations .
coerce_types_ = coerce_types
exact_ratio_ = exact_ratio
partials_get = partials . get
# Add numerators for each denominator , and track the " current " type .
for x in data :
T = coerce_types_ ( T , type ( x ) )
n , d = exact_ratio_ ( x )
partials [ d ] = partials_get ( d , 0 ) + n
if None in partials :
assert issubclass ( T , ( float , Decimal ) )
assert not isfinite ( partials [ None ] )
return T ( partials [ None ] )
total = Fraction ( )
for d , n in sorted ( partials . items ( ) ) :
total += Fraction ( n , d )
if issubclass ( T , int ) :
assert total . denominator == 1
return T ( total . numerator )
if issubclass ( T , Decimal ) :
return T ( total . numerator ) / total . denominator
return T ( total ) |
def push_to_topic ( dst , dat , qos = 0 , retain = False , cfgs = None ) :
"""- 发送数据 dat 到目的地 dst
: param cfgs :
: type cfgs :
: param dst :
: type dst :
: param dat :
: type dat :
: param qos :
: type qos :
: param retain :
: type retain :""" | cfg_amqt = { 'hostname' : cfgs [ 'hostname' ] , 'port' : cfgs [ 'port' ] , 'username' : cfgs [ 'username' ] , 'password' : cfgs [ 'password' ] , }
msg = { 'topic' : dst , 'payload' : dat , 'qos' : qos , 'retain' : retain , }
try :
Publisher ( cfg_amqt ) . run ( msg )
return True
except Exception as err :
log . error ( 'push to amqt: {}' . format ( err ) )
return False |
def reorderbydf ( df2 , df1 ) :
"""Reorder rows of a dataframe by other dataframe
: param df2 : input dataframe
: param df1 : template dataframe""" | df3 = pd . DataFrame ( )
for idx , row in df1 . iterrows ( ) :
df3 = df3 . append ( df2 . loc [ idx , : ] )
return df3 |
def _find_home_or_away ( self , row ) :
"""Determine whether the player is on the home or away team .
Next to every player is their school ' s name . This name can be matched
with the previously parsed home team ' s name to determine if the player
is a member of the home or away team .
Parameters
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player .
Returns
str
Returns a ` ` string ` ` constant denoting whether the team plays for
the home or away team .""" | name = row ( 'td[data-stat="team"]' ) . text ( ) . upper ( )
if name == self . home_abbreviation . upper ( ) :
return HOME
else :
return AWAY |
def generate_harvestable_catalogs ( self , catalogs , harvest = 'all' , report = None , export_path = None ) :
"""Filtra los catálogos provistos según el criterio determinado en
` harvest ` .
Args :
catalogs ( str , dict o list ) : Uno ( str o dict ) o varios ( list de
strs y / o dicts ) catálogos .
harvest ( str ) : Criterio para determinar qué datasets conservar de
cada catálogo ( ' all ' , ' none ' , ' valid ' o ' report ' ) .
report ( list o str ) : Tabla de reporte generada por
generate _ datasets _ report ( ) como lista de diccionarios o archivo
en formato XLSX o CSV . Sólo se usa cuando ` harvest = = ' report ' ` .
export _ path ( str ) : Path a un archivo JSON o directorio donde
exportar los catálogos filtrados . Si termina en " . json " se
exportará la lista de catálogos a un único archivo . Si es un
directorio , se guardará en él un JSON por catálogo . Si se
especifica ` export _ path ` , el método no devolverá nada .
Returns :
list of dicts : Lista de catálogos .""" | assert isinstance ( catalogs , string_types + ( dict , list ) )
# Si se pasa un único catálogo , genero una lista que lo contenga
if isinstance ( catalogs , string_types + ( dict , ) ) :
catalogs = [ catalogs ]
harvestable_catalogs = [ readers . read_catalog ( c ) for c in catalogs ]
catalogs_urls = [ catalog if isinstance ( catalog , string_types ) else None for catalog in catalogs ]
# aplica los criterios de cosecha
if harvest == 'all' :
pass
elif harvest == 'none' :
for catalog in harvestable_catalogs :
catalog [ "dataset" ] = [ ]
elif harvest == 'valid' :
report = self . generate_datasets_report ( catalogs , harvest )
return self . generate_harvestable_catalogs ( catalogs = catalogs , harvest = 'report' , report = report , export_path = export_path )
elif harvest == 'report' :
if not report :
raise ValueError ( """
Usted eligio 'report' como criterio de harvest, pero no proveyo un valor para
el argumento 'report'. Por favor, intentelo nuevamente.""" )
datasets_to_harvest = self . _extract_datasets_to_harvest ( report )
for idx_cat , catalog in enumerate ( harvestable_catalogs ) :
catalog_url = catalogs_urls [ idx_cat ]
if ( "dataset" in catalog and isinstance ( catalog [ "dataset" ] , list ) ) :
catalog [ "dataset" ] = [ dataset for dataset in catalog [ "dataset" ] if ( catalog_url , dataset . get ( "title" ) ) in datasets_to_harvest ]
else :
catalog [ "dataset" ] = [ ]
else :
raise ValueError ( """
{} no es un criterio de harvest reconocido. Pruebe con 'all', 'none', 'valid' o
'report'.""" . format ( harvest ) )
# devuelve los catálogos harvesteables
if export_path and os . path . isdir ( export_path ) : # Creo un JSON por catálogo
for idx , catalog in enumerate ( harvestable_catalogs ) :
filename = os . path . join ( export_path , "catalog_{}" . format ( idx ) )
writers . write_json ( catalog , filename )
elif export_path : # Creo un único JSON con todos los catálogos
writers . write_json ( harvestable_catalogs , export_path )
else :
return harvestable_catalogs |
def uuid ( uuid_value = None ) :
"""Returns a uuid value that is valid to use for id and identity fields .
: return : unicode uuid object if using UUIDFields , uuid unicode string
otherwise .""" | if uuid_value :
if not validate_uuid ( uuid_value ) :
raise ValueError ( "uuid_value must be a valid UUID version 4 object" )
else :
uuid_value = uuid . uuid4 ( )
if versions_settings . VERSIONS_USE_UUIDFIELD :
return uuid_value
else :
return six . u ( str ( uuid_value ) ) |
def _federation_indicators ( catalog , central_catalog , identifier_search = False ) :
"""Cuenta la cantidad de datasets incluídos tanto en la lista
' catalogs ' como en el catálogo central , y genera indicadores a partir
de esa información .
Args :
catalog ( dict ) : catálogo ya parseado
central _ catalog ( str o dict ) : ruta a catálogo central , o un dict
con el catálogo ya parseado""" | result = { 'datasets_federados_cant' : None , 'datasets_federados_pct' : None , 'datasets_no_federados_cant' : None , 'datasets_federados_eliminados_cant' : None , 'distribuciones_federadas_cant' : None , 'datasets_federados_eliminados' : [ ] , 'datasets_no_federados' : [ ] , 'datasets_federados' : [ ] , }
try :
central_catalog = readers . read_catalog ( central_catalog )
except Exception as e :
msg = u'Error leyendo el catálogo central: {}' . format ( str ( e ) )
logger . error ( msg )
return result
generator = FederationIndicatorsGenerator ( central_catalog , catalog , id_based = identifier_search )
result . update ( { 'datasets_federados_cant' : generator . datasets_federados_cant ( ) , 'datasets_no_federados_cant' : generator . datasets_no_federados_cant ( ) , 'datasets_federados_eliminados_cant' : generator . datasets_federados_eliminados_cant ( ) , 'datasets_federados_eliminados' : generator . datasets_federados_eliminados ( ) , 'datasets_no_federados' : generator . datasets_no_federados ( ) , 'datasets_federados' : generator . datasets_federados ( ) , 'datasets_federados_pct' : generator . datasets_federados_pct ( ) , 'distribuciones_federadas_cant' : generator . distribuciones_federadas_cant ( ) } )
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.