signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def meta_retrieve ( self , meta_lookahead = None ) :
"""Get metadata from the query itself . This is guaranteed to only
return a Python dictionary .
Note that if the query failed , the metadata might not be in JSON
format , in which case there may be additional , non - JSON data
which can be retrieved using the following
raw _ meta = req . raw . value
: return : A dictionary containing the query metadata""" | if not self . __meta_received :
if meta_lookahead or self . meta_lookahead :
self . buffered_remainder = list ( self )
else :
raise RuntimeError ( 'This property only valid once all rows are received!' )
if isinstance ( self . raw . value , dict ) :
return self . raw . value
return { } |
def chunks ( l : List [ Any ] , n : int ) -> Iterable [ List [ Any ] ] :
"""Yield successive ` ` n ` ` - sized chunks from ` ` l ` ` .
Args :
l : input list
n : chunk size
Yields :
successive chunks of size ` ` n ` `""" | for i in range ( 0 , len ( l ) , n ) :
yield l [ i : i + n ] |
def encode ( message , encoding_type = 'default' , letter_sep = ' ' * 3 , strip = True ) :
"""Converts a string of message into morse
Two types of marks are there . One is short mark , dot ( . ) or " dit " and
other is long mark , dash ( - ) or " dah " . After every dit or dah , there is
a one dot duration or one unit log gap .
Between every letter , there is a short gap ( three units long ) .
Between every word , there is a medium gap ( seven units long ) .
When encoding is changed to binary , the short mark ( dot ) is denoted by 1
and the long mark ( dash ) is denoted by 111 . The intra character gap between
letters is represented by 0.
The short gap is represented by 000 and the medium gap by 00000.
> > > message = " SOS "
> > > encode ( message )
> > > message = " SOS "
> > > encode ( message , strip = False )
Parameters
message : String
encoding : Type of encoding
Supported types are default ( morse ) and binary .
Returns
encoded _ message : String""" | if strip :
message = message . strip ( )
# No trailing or leading spaces
encoding_type = encoding_type . lower ( )
allowed_encoding_type = [ 'default' , 'binary' ]
if encoding_type == 'default' :
return _encode_to_morse_string ( message , letter_sep )
elif encoding_type == 'binary' :
return _encode_to_binary_string ( message , on = '1' , off = '0' )
else :
raise NotImplementedError ( "encoding_type must be in %s" % allowed_encoding_type ) |
def nla_put_u64 ( msg , attrtype , value ) :
"""Add 64 bit integer attribute to Netlink message .
https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / attr . c # L638
Positional arguments :
msg - - Netlink message ( nl _ msg class instance ) .
attrtype - - attribute type ( integer ) .
value - - numeric value to store as payload ( int ( ) or c _ uint64 ( ) ) .
Returns :
0 on success or a negative error code .""" | data = bytearray ( value if isinstance ( value , c_uint64 ) else c_uint64 ( value ) )
return nla_put ( msg , attrtype , SIZEOF_U64 , data ) |
def hwaddr_interfaces ( ) :
'''Provide a dict of the connected interfaces and their
hw addresses ( Mac Address )''' | # Provides :
# hwaddr _ interfaces
ret = { }
ifaces = _get_interfaces ( )
for face in ifaces :
if 'hwaddr' in ifaces [ face ] :
ret [ face ] = ifaces [ face ] [ 'hwaddr' ]
return { 'hwaddr_interfaces' : ret } |
def DOM_describeNode ( self , ** kwargs ) :
"""Function path : DOM . describeNode
Domain : DOM
Method name : describeNode
Parameters :
Optional arguments :
' nodeId ' ( type : NodeId ) - > Identifier of the node .
' backendNodeId ' ( type : BackendNodeId ) - > Identifier of the backend node .
' objectId ' ( type : Runtime . RemoteObjectId ) - > JavaScript object id of the node wrapper .
' depth ' ( type : integer ) - > The maximum depth at which children should be retrieved , defaults to 1 . Use - 1 for the entire subtree or provide an integer larger than 0.
' pierce ' ( type : boolean ) - > Whether or not iframes and shadow roots should be traversed when returning the subtree ( default is false ) .
Returns :
' node ' ( type : Node ) - > Node description .
Description : Describes node given its id , does not require domain to be enabled . Does not start tracking any objects , can be used for automation .""" | if 'depth' in kwargs :
assert isinstance ( kwargs [ 'depth' ] , ( int , ) ) , "Optional argument 'depth' must be of type '['int']'. Received type: '%s'" % type ( kwargs [ 'depth' ] )
if 'pierce' in kwargs :
assert isinstance ( kwargs [ 'pierce' ] , ( bool , ) ) , "Optional argument 'pierce' must be of type '['bool']'. Received type: '%s'" % type ( kwargs [ 'pierce' ] )
expected = [ 'nodeId' , 'backendNodeId' , 'objectId' , 'depth' , 'pierce' ]
passed_keys = list ( kwargs . keys ( ) )
assert all ( [ ( key in expected ) for key in passed_keys ] ) , "Allowed kwargs are ['nodeId', 'backendNodeId', 'objectId', 'depth', 'pierce']. Passed kwargs: %s" % passed_keys
subdom_funcs = self . synchronous_command ( 'DOM.describeNode' , ** kwargs )
return subdom_funcs |
def __set_hard_hard_constraints ( self , tdata1 , tdata2 , seeds ) :
"""it works with seed labels :
0 : nothing
1 : object 1 - full seeds
2 : object 2 - full seeds
3 : object 1 - not a training seeds
4 : object 2 - not a training seeds""" | seeds_mask = ( seeds == 1 ) | ( seeds == 3 )
tdata2 [ seeds_mask ] = np . max ( tdata2 ) + 1
tdata1 [ seeds_mask ] = 0
seeds_mask = ( seeds == 2 ) | ( seeds == 4 )
tdata1 [ seeds_mask ] = np . max ( tdata1 ) + 1
tdata2 [ seeds_mask ] = 0
return tdata1 , tdata2 |
def _get_data ( self , id_list , format = 'MLDataset' ) :
"""Returns the data , from all modalities , for a given list of IDs""" | format = format . lower ( )
features = list ( )
# returning a dict would be better if AutoMKL ( ) can handle it
for modality , data in self . _modalities . items ( ) :
if format in ( 'ndarray' , 'data_matrix' ) : # turning dict of arrays into a data matrix
# this is arguably worse , as labels are difficult to pass
subset = np . array ( itemgetter ( * id_list ) ( data ) )
elif format in ( 'mldataset' , 'pyradigm' ) : # getting container with fake data
subset = self . _dataset . get_subset ( id_list )
# injecting actual features
subset . data = { id_ : data [ id_ ] for id_ in id_list }
else :
raise ValueError ( 'Invalid output format - choose only one of ' 'MLDataset or data_matrix' )
features . append ( subset )
return features |
def save ( self , inplace = True ) :
"""Saves all modification to the task on the server .
: param inplace Apply edits on the current instance or get a new one .
: return : Task instance .""" | modified_data = self . _modified_data ( )
if bool ( modified_data ) :
task_request_data = { }
inputs = modified_data . pop ( 'inputs' , None )
execution_settings = modified_data . pop ( 'execution_settings' , None )
task_request_data . update ( modified_data )
if inputs :
task_request_data [ 'inputs' ] = self . _serialize_inputs ( inputs )
if execution_settings :
task_request_data [ 'execution_settings' ] = ( self . _serialize_execution_settings ( execution_settings ) )
extra = { 'resource' : self . __class__ . __name__ , 'query' : { 'id' : self . id , 'data' : task_request_data } }
logger . info ( 'Saving task' , extra = extra )
data = self . _api . patch ( url = self . _URL [ 'get' ] . format ( id = self . id ) , data = task_request_data ) . json ( )
task = Task ( api = self . _api , ** data )
return task |
def number_text_lines ( text ) :
r"""Args :
text ( str ) :
Returns :
str : text _ with _ lineno - string with numbered lines""" | numbered_linelist = [ '' . join ( ( ( '%2d' % ( count + 1 ) ) , ' >>> ' , line ) ) for count , line in enumerate ( text . splitlines ( ) ) ]
text_with_lineno = '\n' . join ( numbered_linelist )
return text_with_lineno |
def flush ( self ) :
"""Flush all streams .""" | if self . __logFileStream is not None :
try :
self . __logFileStream . flush ( )
except :
pass
try :
os . fsync ( self . __logFileStream . fileno ( ) )
except :
pass
if self . __stdout is not None :
try :
self . __stdout . flush ( )
except :
pass
try :
os . fsync ( self . __stdout . fileno ( ) )
except :
pass |
def maybe_convert_ix ( * args ) :
"""We likely want to take the cross - product""" | ixify = True
for arg in args :
if not isinstance ( arg , ( np . ndarray , list , ABCSeries , Index ) ) :
ixify = False
if ixify :
return np . ix_ ( * args )
else :
return args |
def _retrieve_device_cache ( proxy = None ) :
'''Loads the network device details if not cached already .''' | global DEVICE_CACHE
if not DEVICE_CACHE :
if proxy and salt . utils . napalm . is_proxy ( __opts__ ) : # if proxy var passed and is NAPALM - type proxy minion
if 'napalm.get_device' in proxy :
DEVICE_CACHE = proxy [ 'napalm.get_device' ] ( )
elif not proxy and salt . utils . napalm . is_minion ( __opts__ ) : # if proxy var not passed and is running in a straight minion
DEVICE_CACHE = salt . utils . napalm . get_device ( __opts__ )
return DEVICE_CACHE |
def sample_distinct ( self , n_to_sample , ** kwargs ) :
"""Sample a sequence of items from the pool until a minimum number of
distinct items are queried
Parameters
n _ to _ sample : int
number of distinct items to sample . If sampling with replacement ,
this number is not necessarily the same as the number of
iterations .""" | # Record how many distinct items have not yet been sampled
n_notsampled = np . sum ( np . isnan ( self . cached_labels_ ) )
if n_notsampled == 0 :
raise Exception ( "All distinct items have already been sampled." )
if n_to_sample > n_notsampled :
warnings . warn ( "Only {} distinct item(s) have not yet been sampled." " Setting n_to_sample = {}." . format ( n_notsampled , n_notsampled ) )
n_to_sample = n_notsampled
n_sampled = 0
# number of distinct items sampled this round
while n_sampled < n_to_sample :
self . sample ( 1 , ** kwargs )
n_sampled += self . _queried_oracle [ self . t_ - 1 ] * 1 |
def _begin_validation ( session : UpdateSession , loop : asyncio . AbstractEventLoop , downloaded_update_path : str , robot_name : str ) -> asyncio . futures . Future :
"""Start the validation process .""" | session . set_stage ( Stages . VALIDATING )
validation_future = asyncio . ensure_future ( loop . run_in_executor ( None , validate_update , downloaded_update_path , session . set_progress ) )
def validation_done ( fut ) :
exc = fut . exception ( )
if exc :
session . set_error ( getattr ( exc , 'short' , str ( type ( exc ) ) ) , str ( exc ) )
else :
rootfs_file , bootfs_file = fut . result ( )
loop . call_soon_threadsafe ( _begin_write , session , loop , rootfs_file , robot_name )
validation_future . add_done_callback ( validation_done )
return validation_future |
def get_epit_vintage_matrix ( self , mnemonic , date_from = '1951-01-01' , date_to = None ) :
"""Construct the vintage matrix for a given economic series .
Requires subscription to Thomson Reuters Economic Point - in - Time ( EPiT ) .
Vintage matrix represents a DataFrame where columns correspond to a
particular period ( quarter or month ) for the reported statistic and
index represents timestamps at which these values were released by
the respective official agency . I . e . every line corresponds to all
available reported values by the given date .
For example :
> > DWE . get _ epit _ vintage _ matrix ( ' USGDP . . . D ' , date _ from = ' 2015-01-01 ' )
2015-02-15 2015-05-15 2015-08-15 2015-11-15 2015-04-29 16304.80 NaN NaN NaN
2015-05-29 16264.10 NaN NaN NaN
2015-06-24 16287.70 NaN NaN NaN
2015-07-30 16177.30 16270.400 NaN NaN
2015-08-27 16177.30 16324.300 NaN NaN
2015-09-25 16177.30 16333.600 NaN NaN
2015-10-29 16177.30 16333.600 16394.200 NaN
2015-11-24 16177.30 16333.600 16417.800 NaN
From the matrix it is seen for example , that the advance GDP estimate
for 2015 - Q1 ( corresponding to 2015-02-15 ) was released on 2015-04-29
and was equal to 16304.80 ( B USD ) . The first revision ( 16264.10 ) has
happened on 2015-05-29 and the second ( 16287.70 ) - on 2015-06-24.
On 2015-07-30 the advance GDP figure for 2015 - Q2 was released
(16270.400 ) together with update on the 2015 - Q1 value ( 16177.30)
and so on .""" | # Get first available date from the REL1 series
rel1 = self . fetch ( mnemonic , 'REL1' , date_from = date_from , date_to = date_to )
date_0 = rel1 . dropna ( ) . index [ 0 ]
# All release dates
reld123 = self . fetch ( mnemonic , [ 'RELD1' , 'RELD2' , 'RELD3' ] , date_from = date_0 , date_to = date_to ) . dropna ( how = 'all' )
# Fetch all vintages
res = { }
for date in reld123 . index :
try :
_tmp = self . fetch ( mnemonic , 'RELV' , date_from = date_0 , date_to = date ) . dropna ( )
except DatastreamException :
continue
res [ date ] = _tmp
return pd . concat ( res ) . RELV . unstack ( ) |
def _logger ( self ) :
"""Create a logger to be used between processes .
: returns : Logging instance .""" | logger = logging . getLogger ( self . NAME )
logger . setLevel ( self . LOG_LEVEL )
shandler = logging . StreamHandler ( sys . stdout )
fmt = '\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'
fmt += '%(lineno)d %(asctime)s\033[0m| %(message)s'
shandler . setFormatter ( logging . Formatter ( fmt ) )
logger . addHandler ( shandler )
return logger |
def abspath ( path , ref = None ) :
"""Create an absolute path .
Parameters
path : str
absolute or relative path with respect to ` ref `
ref : str or None
reference path if ` path ` is relative
Returns
path : str
absolute path
Raises
ValueError
if an absolute path cannot be constructed""" | if ref :
path = os . path . join ( ref , path )
if not os . path . isabs ( path ) :
raise ValueError ( "expected an absolute path but got '%s'" % path )
return path |
def _delete_os_nwk ( self , tenant_id , tenant_name , direc , is_fw_virt = False ) :
"""Delete the network created in Openstack .
Function to delete Openstack network , It also releases the associated
segmentation , VLAN and subnets .""" | serv_obj = self . get_service_obj ( tenant_id )
fw_dict = serv_obj . get_fw_dict ( )
fw_id = fw_dict . get ( 'fw_id' )
fw_data , fw_data_dict = self . get_fw ( fw_id )
if fw_data is None :
LOG . error ( "Unable to get fw_data for tenant %s" , tenant_name )
return False
if direc == 'in' :
net_id = fw_data . in_network_id
seg , vlan = self . get_in_seg_vlan ( tenant_id )
subnet_dict = self . get_in_ip_addr ( tenant_id )
else :
net_id = fw_data . out_network_id
seg , vlan = self . get_out_seg_vlan ( tenant_id )
subnet_dict = self . get_out_ip_addr ( tenant_id )
# Delete the Openstack Network
sub = subnet_dict . get ( 'subnet' )
try :
ret = self . os_helper . delete_network_all_subnets ( net_id )
if not ret :
LOG . error ( "Delete network for ID %(net)s direct %(dir)s " "failed" , { 'net' : net_id , 'dir' : direc } )
return False
except Exception as exc :
LOG . error ( "Delete network for ID %(net)s direct %(dir)s failed" " Exc %(exc)s" , { 'net' : net_id , 'dir' : direc , 'exc' : exc } )
return False
# Release the segment , VLAN and subnet allocated
if not is_fw_virt :
self . service_vlans . release_segmentation_id ( vlan )
self . service_segs . release_segmentation_id ( seg )
self . release_subnet ( sub , direc )
# Release the network DB entry
self . delete_network_db ( net_id )
return True |
def format ( self , result ) :
"""Generate plain text report .
: return : Report body
: rtype : str""" | m = self . meta
lines = [ '-' * len ( self . TITLE ) , self . TITLE , '-' * len ( self . TITLE ) , "Compared: {db1} <-> {db2}" . format ( ** m ) , "Filter: {filter}" . format ( ** m ) , "Run time: {start_time} -- {end_time} ({elapsed:.1f} sec)" . format ( ** m ) , "" ]
for section in result . keys ( ) :
lines . append ( "* " + section . title ( ) )
indent = " " * 4
if len ( result [ section ] ) == 0 :
lines . append ( "{}EMPTY" . format ( indent ) )
else :
keyset , maxwid = self . result_subsets ( result [ section ] )
for columns in keyset :
ocol = self . ordered_cols ( columns , section )
mw = maxwid [ columns ]
mw_i = [ columns . index ( c ) for c in ocol ]
# reorder indexes
fmt = ' ' . join ( [ "{{:{:d}s}}" . format ( mw [ i ] ) for i in mw_i ] )
lines . append ( "" )
lines . append ( indent + fmt . format ( * ocol ) )
lines . append ( indent + '-_' * ( sum ( mw ) / 2 + len ( columns ) ) )
rows = result [ section ]
self . sort_rows ( rows , section )
for r in rows :
key = tuple ( sorted ( r . keys ( ) ) )
if key == columns :
values = [ str ( r [ k ] ) for k in ocol ]
lines . append ( indent + fmt . format ( * values ) )
return '\n' . join ( lines ) |
def format_table ( table , align = '<' , format = '{:.3g}' , colwidth = None , maxwidth = None , spacing = 2 , truncate = 0 , suffix = "..." ) :
"""Formats a table represented as an iterable of iterable into a nice big string
suitable for printing .
Parameters :
align : string or list of strings
Alignment of cell contents . Each character in a string specifies
the alignment of one column .
* ` ` < ` ` - Left aligned ( default )
* ` ` ^ ` ` - Centered
* ` ` > ` ` - Right aligned
The last alignment is repeated for unspecified columns .
If it ' s a list of strings , each string specifies the alignment of
one row . The last string is used repeatedly for unspecified rows .
format : string / function , or ( nested ) list of string / function
Formats the contents of the cells using the specified function ( s )
or format string ( s ) .
If it ' s a list of strings / functions each entry specifies formatting
for one column , the last entry being used repeatedly for
unspecified columns .
If it ' s a list of lists , each sub - list specifies one row , the last
sub - list being used repeatedly for unspecified rows .
colwidth : int , list of ints or None
The width of each column . The last width is used repeatedly for
unspecified columns . If ` ` None ` ` the width is fitted to the
contents .
maxwidth : int or None
The maximum width of the table . Defaults to terminal width minus
1 if ` ` None ` ` . If the table would be wider than ` ` maxwidth ` ` one
of the columns is truncated .
spacing : int
The spacing between columns
truncate : int
Which column to truncate if table width would exceed ` ` maxwidth ` ` .
Beware that no columns can have zero or negative width . If for instance
' maxwidth ' is 80 and ' colwidth ' is [ 10 , 30 , 30 , 30 ] with spacing 2 the total
width will initially be 10 + 2 + 30 + 2 + 30 + 2 + 30 = 106 . That ' s 26 characters too
much , so a width of 26 will be removed from the truncated column . If
' truncate ' is 0 , column 0 will have a width of - 16 which is not permitted .""" | table = list ( deepcopy ( table ) )
if not isinstance ( align , list ) :
align = [ align ]
if not isinstance ( format , list ) :
format = [ format ]
if not isinstance ( format [ 0 ] , list ) :
format = [ format ]
num_cols = len ( table [ 0 ] )
if len ( set ( [ len ( row ) for row in table ] ) ) > 1 :
raise ValueError ( "All rows must have the same number of columns" )
for i in range ( len ( table ) ) :
table [ i ] = list ( table [ i ] )
colformat = format [ min ( i , len ( format ) - 1 ) ]
for j , cell in enumerate ( table [ i ] ) :
f = colformat [ min ( j , len ( colformat ) - 1 ) ]
if isinstance ( f , str ) :
fun = lambda x : f . format ( x )
else :
fun = f
try :
table [ i ] [ j ] = fun ( cell )
except :
table [ i ] [ j ] = str ( cell )
if colwidth == None :
cellwidth = [ [ len ( cell ) for cell in row ] for row in table ]
colwidth = list ( map ( max , zip ( * cellwidth ) ) )
elif not isinstance ( colwidth , list ) :
colwidth = [ colwidth ]
colwidth . extend ( [ colwidth [ - 1 ] ] * ( num_cols - len ( colwidth ) ) )
if maxwidth == None :
maxwidth = get_terminal_size ( ) . columns - 1
width = sum ( colwidth ) + spacing * ( num_cols - 1 )
if width > maxwidth :
colwidth [ truncate ] -= ( width - maxwidth )
for j , cw in enumerate ( colwidth ) :
if cw < 1 :
raise RuntimeError ( "Column {} in format_table() has width {}. " "Make sure all columns have width >0. " "Read docstring for further details." . format ( j , cw ) )
s = ''
for i , row in enumerate ( table ) :
if i != 0 :
s += "\n"
colalign = align [ min ( i , len ( align ) - 1 ) ]
colformat = format [ min ( i , len ( format ) - 1 ) ]
for j , col in enumerate ( row ) :
a = colalign [ min ( j , len ( colalign ) - 1 ) ]
f = colformat [ min ( j , len ( colformat ) - 1 ) ]
w = colwidth [ j ]
if j != 0 :
s += ' ' * spacing
s += format_fit ( format_time ( col ) , w , a , suffix )
return s |
def delegate_to_method ( mtd ) :
"""Create a simplification rule that delegates the instantiation to the
method ` mtd ` of the operand ( if defined )""" | def _delegate_to_method ( cls , ops , kwargs ) :
assert len ( ops ) == 1
op , = ops
if hasattr ( op , mtd ) :
return getattr ( op , mtd ) ( )
else :
return ops , kwargs
return _delegate_to_method |
def get_context ( self , context ) :
"""Return a tag or a list of tags context encoded .""" | # forward pass
i = 0
while i < len ( self . tagList ) :
tag = self . tagList [ i ]
# skip application stuff
if tag . tagClass == Tag . applicationTagClass :
pass
# check for context encoded atomic value
elif tag . tagClass == Tag . contextTagClass :
if tag . tagNumber == context :
return tag
# check for context encoded group
elif tag . tagClass == Tag . openingTagClass :
keeper = tag . tagNumber == context
rslt = [ ]
i += 1
lvl = 0
while i < len ( self . tagList ) :
tag = self . tagList [ i ]
if tag . tagClass == Tag . openingTagClass :
lvl += 1
elif tag . tagClass == Tag . closingTagClass :
lvl -= 1
if lvl < 0 :
break
rslt . append ( tag )
i += 1
# make sure everything balances
if lvl >= 0 :
raise InvalidTag ( "mismatched open/close tags" )
# get everything we need ?
if keeper :
return TagList ( rslt )
else :
raise InvalidTag ( "unexpected tag" )
# try the next tag
i += 1
# nothing found
return None |
def disco_loop ( opc , version , queue , real_out , dup_lines = False , show_bytes = False ) :
"""Disassembles a queue of code objects . If we discover
another code object which will be found in co _ consts , we add
the new code to the list . Note that the order of code discovery
is in the order of first encountered which is not amenable for
the format used by a disassembler where code objects should
be defined before using them in other functions .
However this is not recursive and will overall lead to less
memory consumption at run time .""" | while len ( queue ) > 0 :
co = queue . popleft ( )
if co . co_name not in ( '<module>' , '?' ) :
real_out . write ( "\n" + format_code_info ( co , version ) + "\n" )
bytecode = Bytecode ( co , opc , dup_lines = dup_lines )
real_out . write ( bytecode . dis ( show_bytes = show_bytes ) + "\n" )
for c in co . co_consts :
if iscode ( c ) :
queue . append ( c )
pass
pass |
def standardize ( self , x ) :
"""Apply the normalization configuration to a batch of inputs .
# Arguments
x : batch of inputs to be normalized .
# Returns
The inputs , normalized .""" | if self . preprocessing_function :
x = self . preprocessing_function ( x )
if self . rescale :
x *= self . rescale
if self . samplewise_center :
x -= np . mean ( x , keepdims = True )
if self . samplewise_std_normalization :
x /= np . std ( x , keepdims = True ) + 1e-7
if self . featurewise_center :
if self . mean is not None :
x -= self . mean
else :
warnings . warn ( 'This ImageDataGenerator specifies ' '`featurewise_center`, but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.' )
if self . featurewise_std_normalization :
if self . std is not None :
x /= ( self . std + 1e-7 )
else :
warnings . warn ( 'This ImageDataGenerator specifies ' '`featurewise_std_normalization`, but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.' )
if self . zca_whitening :
if self . principal_components is not None :
flatx = np . reshape ( x , ( - 1 , np . prod ( x . shape [ - 3 : ] ) ) )
whitex = np . dot ( flatx , self . principal_components )
x = np . reshape ( whitex , x . shape )
else :
warnings . warn ( 'This ImageDataGenerator specifies ' '`zca_whitening`, but it hasn\'t ' 'been fit on any training data. Fit it ' 'first by calling `.fit(numpy_data)`.' )
return x |
def _import_matplotlib ( ) :
"""Import matplotlib safely .""" | # make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib . use ( 'agg' )
matplotlib_backend = matplotlib . get_backend ( ) . lower ( )
if matplotlib_backend != 'agg' :
raise ValueError ( "Sphinx-Gallery relies on the matplotlib 'agg' backend to " "render figures and write them to files. You are " "currently using the {} backend. Sphinx-Gallery will " "terminate the build now, because changing backends is " "not well supported by matplotlib. We advise you to move " "sphinx_gallery imports before any matplotlib-dependent " "import. Moving sphinx_gallery imports at the top of " "your conf.py file should fix this issue" . format ( matplotlib_backend ) )
import matplotlib . pyplot as plt
return matplotlib , plt |
def to_string ( self ) :
"""Convert SitemapIndex into a string .""" | root = etree . Element ( 'sitemapindex' , nsmap = { None : SITEMAP_NS } )
for sitemap in self . sitemaps :
sm = etree . SubElement ( root , 'sitemap' )
etree . SubElement ( sm , 'loc' ) . text = sitemap . url
if hasattr ( sitemap . lastmod , 'strftime' ) :
etree . SubElement ( sm , 'lastmod' ) . text = sitemap . lastmod . strftime ( '%Y-%m-%d' )
elif isinstance ( sitemap . lastmod , str ) :
etree . SubElement ( sm , 'lastmod' ) . text = sitemap . lastmod
return etree . tostring ( root , pretty_print = True , xml_declaration = True , encoding = 'utf-8' ) |
def is_singlesig ( privkey_info , blockchain = 'bitcoin' , ** blockchain_opts ) :
"""Is the given private key bundle a single - sig key bundle ?""" | if blockchain == 'bitcoin' :
return btc_is_singlesig ( privkey_info , ** blockchain_opts )
else :
raise ValueError ( 'Unknown blockchain "{}"' . format ( blockchain ) ) |
def map_over_glob ( fn , path , pattern ) :
"""map a function over a glob pattern , relative to a directory""" | return [ fn ( x ) for x in glob . glob ( os . path . join ( path , pattern ) ) ] |
def get_default_candidate ( self , component ) :
"""Gets the default local candidate for the specified component .""" | for candidate in sorted ( self . _local_candidates , key = lambda x : x . priority ) :
if candidate . component == component :
return candidate |
def _check_cmdline ( data ) :
'''In some cases where there are an insane number of processes being created
on a system a PID can get recycled or assigned to a non - Salt process .
On Linux this fn checks to make sure the PID we are checking on is actually
a Salt process .
For non - Linux systems we punt and just return True''' | if not salt . utils . platform . is_linux ( ) :
return True
pid = data . get ( 'pid' )
if not pid :
return False
if not os . path . isdir ( '/proc' ) :
return True
path = os . path . join ( '/proc/{0}/cmdline' . format ( pid ) )
if not os . path . isfile ( path ) :
return False
try :
with salt . utils . files . fopen ( path , 'rb' ) as fp_ :
if b'salt' in fp_ . read ( ) :
return True
except ( OSError , IOError ) :
return False |
def cleanup_defenses ( self ) :
"""Cleans up all data about defense work in current round .""" | print_header ( 'CLEANING UP DEFENSES DATA' )
work_ancestor_key = self . datastore_client . key ( 'WorkType' , 'AllDefenses' )
keys_to_delete = [ e . key for e in self . datastore_client . query_fetch ( kind = u'ClassificationBatch' ) ] + [ e . key for e in self . datastore_client . query_fetch ( kind = u'Work' , ancestor = work_ancestor_key ) ]
self . _cleanup_keys_with_confirmation ( keys_to_delete ) |
def build_play ( self , pbp_row ) :
"""Parses table row from RTSS . These are the rows tagged with ` ` < tr class = ' evenColor ' . . . > ` ` . Result set
contains : py : class : ` nhlscrapi . games . playbyplay . Strength ` and : py : class : ` nhlscrapi . games . events . EventType `
objects . Returned play data is in the form
. . code : : python
' play _ num ' : num _ of _ play
' period ' : curr _ period
' strength ' : strength _ enum
' time ' : { ' min ' : min , ' sec ' : sec }
' vis _ on _ ice ' : { ' player _ num ' : player }
' home _ on _ ice ' : { ' player _ num ' : player }
' event ' : event _ object
: param pbp _ row : table row from RTSS
: returns : play data
: rtype : dict""" | d = pbp_row . findall ( './td' )
c = PlayParser . ColMap ( self . season )
p = { }
to_dig = lambda t : int ( t ) if t . isdigit ( ) else 0
p [ 'play_num' ] = to_int ( d [ c [ "play_num" ] ] . text , 0 )
p [ 'period' ] = to_int ( d [ c [ "per" ] ] . text , 0 )
p [ 'strength' ] = self . __strength ( d [ c [ "str" ] ] . text )
time = d [ c [ "time" ] ] . text . split ( ":" )
p [ 'time' ] = { "min" : int ( time [ 0 ] ) , "sec" : int ( time [ 1 ] ) }
skater_tab = d [ c [ "vis" ] ] . xpath ( "./table" )
p [ 'vis_on_ice' ] = self . __skaters ( skater_tab [ 0 ] [ 0 ] ) if len ( skater_tab ) else { }
skater_tab = d [ c [ "home" ] ] . xpath ( "./table" )
p [ 'home_on_ice' ] = self . __skaters ( skater_tab [ 0 ] [ 0 ] ) if len ( skater_tab ) else { }
p [ 'event' ] = event_type_mapper ( d [ c [ "event" ] ] . text , period = p [ 'period' ] , skater_ct = len ( p [ 'vis_on_ice' ] ) + len ( p [ 'home_on_ice' ] ) , game_type = self . game_type )
p [ 'event' ] . desc = " " . join ( [ t . encode ( 'ascii' , 'replace' ) . decode ( 'utf-8' ) for t in d [ c [ "desc" ] ] . xpath ( "text()" ) ] )
parse_event_desc ( p [ 'event' ] , season = self . season )
return p |
def _get_max_size ( parts , size = 1 ) :
"""Given a list of parts , find the maximum number of commands
contained in it .""" | max_group_size = 0
for part in parts :
if isinstance ( part , list ) :
group_size = 0
for input_group in part :
group_size += 1
if group_size > max_group_size :
max_group_size = group_size
magic_size = _get_magic_size ( parts )
return max_group_size * magic_size |
def input_files ( self ) :
"""List the input files""" | return self . workspace . mets . find_files ( fileGrp = self . input_file_grp , pageId = self . page_id ) |
def set_connection ( self , service_name , to_cache ) :
"""Sets a connection class within the cache .
: param service _ name : The service a given ` ` Connection ` ` talks to . Ex .
` ` sqs ` ` , ` ` sns ` ` , ` ` dynamodb ` ` , etc .
: type service _ name : string
: param to _ cache : The class to be cached for the service .
: type to _ cache : class""" | self . services . setdefault ( service_name , { } )
self . services [ service_name ] [ 'connection' ] = to_cache |
def convert_python_regex_to_ecma ( value , flags = [ ] ) :
"""Convert Python regex to ECMA 262 regex .
If given value is already ECMA regex it will be returned unchanged .
: param string value : Python regex .
: param list flags : List of flags ( allowed flags : ` re . I ` , ` re . M ` )
: return : ECMA 262 regex
: rtype : str""" | if is_ecma_regex ( value ) :
return value
result_flags = [ PYTHON_TO_ECMA_FLAGS [ f ] for f in flags ]
result_flags = '' . join ( result_flags )
return '/{value}/{flags}' . format ( value = value , flags = result_flags ) |
def relation_to_intermediary ( fk ) :
"""Transform an SQLAlchemy ForeignKey object to it ' s intermediary representation .""" | return Relation ( right_col = format_name ( fk . parent . table . fullname ) , left_col = format_name ( fk . _column_tokens [ 1 ] ) , right_cardinality = '?' , left_cardinality = '*' , ) |
async def get_or_create ( cls , lang : str , * , client_token : str = None , mounts : Iterable [ str ] = None , envs : Mapping [ str , str ] = None , resources : Mapping [ str , int ] = None , cluster_size : int = 1 , tag : str = None , owner_access_key : str = None ) -> 'Kernel' :
'''Get - or - creates a compute session .
If * client _ token * is ` ` None ` ` , it creates a new compute session as long as
the server has enough resources and your API key has remaining quota .
If * client _ token * is a valid string and there is an existing compute session
with the same token and the same * lang * , then it returns the : class : ` Kernel `
instance representing the existing session .
: param lang : The image name and tag for the compute session .
Example : ` ` python : 3.6 - ubuntu ` ` .
Check out the full list of available images in your server using ( TODO :
new API ) .
: param client _ token : A client - side identifier to seamlessly reuse the compute
session already created .
: param mounts : The list of vfolder names that belongs to the currrent API
access key .
: param envs : The environment variables which always bypasses the jail policy .
: param resources : The resource specification . ( TODO : details )
: param cluster _ size : The number of containers in this compute session .
Must be at least 1.
: param tag : An optional string to annotate extra information .
: param owner : An optional access key that owns the created session . ( Only
available to administrators )
: returns : The : class : ` Kernel ` instance .''' | if client_token :
assert 4 <= len ( client_token ) <= 64 , 'Client session token should be 4 to 64 characters long.'
else :
client_token = uuid . uuid4 ( ) . hex
if mounts is None :
mounts = [ ]
if resources is None :
resources = { }
mounts . extend ( cls . session . config . vfolder_mounts )
rqst = Request ( cls . session , 'POST' , '/kernel/create' )
rqst . set_json ( { 'lang' : lang , 'tag' : tag , 'clientSessionToken' : client_token , 'config' : { 'mounts' : mounts , 'environ' : envs , 'clusterSize' : cluster_size , 'resources' : resources , } , } )
async with rqst . fetch ( ) as resp :
data = await resp . json ( )
o = cls ( data [ 'kernelId' ] , owner_access_key )
# type : ignore
o . created = data . get ( 'created' , True )
# True is for legacy
o . service_ports = data . get ( 'servicePorts' , [ ] )
return o |
def _cursor ( self ) :
"""Asserts that the connection is open and returns a cursor""" | if self . _conn is None :
self . _conn = sqlite3 . connect ( self . filename , check_same_thread = False )
return self . _conn . cursor ( ) |
def on_message ( self , event ) :
'''Runs when a message event is received
Args :
event : RTM API event .
Returns :
Legobot . messge''' | metadata = self . _parse_metadata ( event )
message = Message ( text = metadata [ 'text' ] , metadata = metadata ) . __dict__
if message . get ( 'text' ) :
message [ 'text' ] = self . find_and_replace_userids ( message [ 'text' ] )
message [ 'text' ] = self . find_and_replace_channel_refs ( message [ 'text' ] )
return message |
def tokenize_akkadian_signs ( word ) :
"""Takes tuple ( word , language ) and splits the word up into individual
sign tuples ( sign , language ) in a list .
input : ( " { gisz } isz - pur - ram " , " akkadian " )
output : [ ( " gisz " , " determinative " ) , ( " isz " , " akkadian " ) ,
( " pur " , " akkadian " ) , ( " ram " , " akkadian " ) ]
: param : tuple created by word _ tokenizer2
: return : list of tuples : ( sign , function or language )""" | word_signs = [ ]
sign = ''
language = word [ 1 ]
determinative = False
for char in word [ 0 ] :
if determinative is True :
if char == '}' :
determinative = False
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , 'determinative' ) )
sign = ''
language = word [ 1 ]
continue
else :
sign += char
continue
else :
if language == 'akkadian' :
if char == '{' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
determinative = True
continue
elif char == '_' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
language = 'sumerian'
continue
elif char == '-' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
language = word [ 1 ]
# or default word [ 1 ] ?
continue
else :
sign += char
elif language == 'sumerian' :
if char == '{' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
determinative = True
continue
elif char == '_' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
language = word [ 1 ]
continue
elif char == '-' :
if len ( sign ) > 0 : # pylint : disable = len - as - condition
word_signs . append ( ( sign , language ) )
sign = ''
language = word [ 1 ]
continue
else :
sign += char
if len ( sign ) > 0 :
word_signs . append ( ( sign , language ) )
return word_signs |
def get_credentials ( self ) :
"""Return a set of credentials that may be used to access the Upload Area folder in the S3 bucket
: return : a dict containing AWS credentials in a format suitable for passing to Boto3
or if capitalized , used as environment variables""" | creds_mgr = CredentialsManager ( self )
creds = creds_mgr . get_credentials_from_upload_api ( )
return { 'aws_access_key_id' : creds [ 'access_key' ] , 'aws_secret_access_key' : creds [ 'secret_key' ] , 'aws_session_token' : creds [ 'token' ] , 'expiry_time' : creds [ 'expiry_time' ] } |
def dissolve ( inlist ) :
"""list and tuple flattening
Parameters
inlist : list
the list with sub - lists or tuples to be flattened
Returns
list
the flattened result
Examples
> > > dissolve ( [ [ 1 , 2 ] , [ 3 , 4 ] ] )
[1 , 2 , 3 , 4]
> > > dissolve ( [ ( 1 , 2 , ( 3 , 4 ) ) , [ 5 , ( 6 , 7 ) ] ] )
[1 , 2 , 3 , 4 , 5 , 6 , 7]""" | out = [ ]
for i in inlist :
i = list ( i ) if isinstance ( i , tuple ) else i
out . extend ( dissolve ( i ) ) if isinstance ( i , list ) else out . append ( i )
return out |
def get_handler ( self , handler_input , exception ) : # type : ( Input , Exception ) - > Union [ AbstractExceptionHandler , None ]
"""Get the exception handler that can handle the input and
exception .
: param handler _ input : Generic input passed to the
dispatcher .
: type handler _ input : Input
: param exception : Exception thrown by
: py : class : ` ask _ sdk _ runtime . dispatch . GenericRequestDispatcher `
dispatch method .
: type exception : Exception
: return : Exception Handler that can handle the input or None .
: rtype : Union [ None , ask _ sdk _ runtime . dispatch _ components . exception _ components . AbstractExceptionHandler ]""" | for handler in self . exception_handlers :
if handler . can_handle ( handler_input = handler_input , exception = exception ) :
return handler
return None |
def get_html_tag_lang_params ( index_page ) :
"""Parse lang and xml : lang parameters in the ` ` < html > ` ` tag .
See
https : / / www . w3 . org / International / questions / qa - html - language - declarations
for details .
Args :
index _ page ( str ) : HTML content of the page you wisht to analyze .
Returns :
list : List of : class : ` . SourceString ` objects .""" | dom = dhtmlparser . parseString ( index_page )
html_tag = dom . find ( "html" )
if not html_tag :
return [ ]
html_tag = html_tag [ 0 ]
# parse parameters
lang = html_tag . params . get ( "lang" )
xml_lang = html_tag . params . get ( "xml:lang" )
if lang and lang == xml_lang :
return [ SourceString ( lang , source = "<html> tag" ) ]
out = [ ]
if lang :
out . append ( SourceString ( lang , source = "<html lang=..>" ) )
if xml_lang :
out . append ( SourceString ( xml_lang , source = "<html xml:lang=..>" ) )
return out |
def removeSettingsGroup ( groupName , settings = None ) :
"""Removes a group from the persistent settings""" | logger . debug ( "Removing settings group: {}" . format ( groupName ) )
settings = QtCore . QSettings ( ) if settings is None else settings
settings . remove ( groupName ) |
def get_value_ddist ( self , attr_name , attr_value ) :
"""Returns the class value probability distribution of the given
attribute value .""" | assert not self . tree . data . is_continuous_class , "Discrete distributions are only maintained for " + "discrete class types."
ddist = DDist ( )
cls_counts = self . _attr_class_value_counts [ attr_name ] [ attr_value ]
for cls_value , cls_count in iteritems ( cls_counts ) :
ddist . add ( cls_value , count = cls_count )
return ddist |
def continuousSetsGenerator ( self , request ) :
"""Returns a generator over the ( continuousSet , nextPageToken ) pairs
defined by the specified request .""" | dataset = self . getDataRepository ( ) . getDataset ( request . dataset_id )
return self . _topLevelObjectGenerator ( request , dataset . getNumContinuousSets ( ) , dataset . getContinuousSetByIndex ) |
def get_object ( self ) :
"""Return the object of this TimeMachine""" | return self . content_type . model_class ( ) . objects . get ( uid = self . uid ) |
def _add_users ( self , db , mongo_version ) :
"""Add given user , and extra x509 user if necessary .""" | if self . x509_extra_user : # Build dict of kwargs to pass to add _ user .
auth_dict = { 'name' : DEFAULT_SUBJECT , 'roles' : self . _user_roles ( db . client ) }
db . add_user ( ** auth_dict )
# Fix kwargs to MongoClient .
self . kwargs [ 'ssl_certfile' ] = DEFAULT_CLIENT_CERT
# Add secondary user given from request .
secondary_login = { 'name' : self . login , 'roles' : self . _user_roles ( db . client ) }
if self . password :
secondary_login [ 'password' ] = self . password
if mongo_version >= ( 3 , 7 , 2 ) : # Use SCRAM _ SHA - 1 so that pymongo < 3.7 can authenticate .
secondary_login [ 'mechanisms' ] = [ 'SCRAM-SHA-1' ]
db . add_user ( ** secondary_login ) |
def provision ( self , conf ) :
"""Provision this metaconfig ' s config with what we gathered .
Since Config has native support for ini files , we just need to
let this metaconfig ' s config know about the ini file we found .
In future scenarios , this is where we would implement logic
specific to a metaconfig source if that source is not natively
supported by Config .""" | if self . ini and self . ini not in conf . _ini_paths :
conf . _ini_paths . insert ( 0 , self . ini ) |
def get_document_summary ( self , N = None , cite_sort = True , refresh = True ) :
"""Return a summary string of documents .
Parameters
N : int or None ( optional , default = None )
Maximum number of documents to include in the summary .
If None , return all documents .
cite _ sort : bool ( optional , default = True )
Whether to sort xml by number of citations , in decreasing order ,
or not .
refresh : bool ( optional , default = True )
Whether to refresh the cached abstract file ( if it exists ) or not .
Returns
s : str
Text summarizing an author ' s documents .""" | abstracts = self . get_abstracts ( refresh = refresh )
if cite_sort :
counts = [ ( a , int ( a . citedby_count ) ) for a in abstracts ]
counts . sort ( reverse = True , key = itemgetter ( 1 ) )
abstracts = [ a [ 0 ] for a in counts ]
if N is None :
N = len ( abstracts )
s = [ u'{0} of {1} documents' . format ( N , len ( abstracts ) ) ]
for i in range ( N ) :
s += [ '{0:2d}. {1}\n' . format ( i + 1 , str ( abstracts [ i ] ) ) ]
return '\n' . join ( s ) |
def read_hdf5_dict ( h5f , names = None , group = None , ** kwargs ) :
"""Read a ` TimeSeriesDict ` from HDF5""" | # find group from which to read
if group :
h5g = h5f [ group ]
else :
h5g = h5f
# find list of names to read
if names is None :
names = [ key for key in h5g if _is_timeseries_dataset ( h5g [ key ] ) ]
# read names
out = kwargs . pop ( 'dict_type' , TimeSeriesDict ) ( )
kwargs . setdefault ( 'array_type' , out . EntryClass )
for name in names :
out [ name ] = read_hdf5_timeseries ( h5g [ name ] , ** kwargs )
return out |
def softplus ( attrs , inputs , proto_obj ) :
"""Applies the sofplus activation function element - wise to the input .""" | new_attrs = translation_utils . _add_extra_attributes ( attrs , { 'act_type' : 'softrelu' } )
return 'Activation' , new_attrs , inputs |
def _check_channel_state_for_update ( self , channel_identifier : ChannelID , closer : Address , update_nonce : Nonce , block_identifier : BlockSpecification , ) -> Optional [ str ] :
"""Check the channel state on chain to see if it has been updated .
Compare the nonce , we are about to update the contract with , with the
updated nonce in the onchain state and , if it ' s the same , return a
message with which the caller should raise a RaidenRecoverableError .
If all is okay return None .""" | msg = None
closer_details = self . _detail_participant ( channel_identifier = channel_identifier , participant = closer , partner = self . node_address , block_identifier = block_identifier , )
if closer_details . nonce == update_nonce :
msg = ( 'updateNonClosingBalanceProof transaction has already ' 'been mined and updated the channel succesfully.' )
return msg |
def get_file ( self , name , filename ) :
"""Saves the content of file named ` ` name ` ` to ` ` filename ` ` .
Works like : meth : ` get _ stream ` , but ` ` filename ` ` is the name of
a file which will be created ( or overwritten ) .
Returns the full versioned name of the retrieved file .""" | stream , vname = self . get_stream ( name )
path , version = split_name ( vname )
dir_path = os . path . dirname ( filename )
if dir_path :
mkdir ( dir_path )
with open ( filename , 'wb' ) as f :
shutil . copyfileobj ( stream , f )
return vname |
def clustering_coef_wu ( W ) :
'''The weighted clustering coefficient is the average " intensity " of
triangles around a node .
Parameters
W : NxN np . ndarray
weighted undirected connection matrix
Returns
C : Nx1 np . ndarray
clustering coefficient vector''' | K = np . array ( np . sum ( np . logical_not ( W == 0 ) , axis = 1 ) , dtype = float )
ws = cuberoot ( W )
cyc3 = np . diag ( np . dot ( ws , np . dot ( ws , ws ) ) )
K [ np . where ( cyc3 == 0 ) ] = np . inf
# if no 3 - cycles exist , set C = 0
C = cyc3 / ( K * ( K - 1 ) )
return C |
def _get_pdi ( cls , df , windows ) :
"""+ DI , positive directional moving index
: param df : data
: param windows : range
: return :""" | window = cls . get_only_one_positive_int ( windows )
pdm_column = 'pdm_{}' . format ( window )
tr_column = 'atr_{}' . format ( window )
pdi_column = 'pdi_{}' . format ( window )
df [ pdi_column ] = df [ pdm_column ] / df [ tr_column ] * 100
return df [ pdi_column ] |
def computePCsPlink ( plink_path , k , out_dir , bfile , ffile ) :
"""computing the covariance matrix via plink""" | print ( "Using plink to compute principal components" )
cmd = '%s --bfile %s --pca %d ' % ( plink_path , bfile , k )
cmd += '--out %s' % ( os . path . join ( out_dir , 'plink' ) )
subprocess . call ( cmd , shell = True )
plink_fn = os . path . join ( out_dir , 'plink.eigenvec' )
M = sp . loadtxt ( plink_fn , dtype = str )
U = sp . array ( M [ : , 2 : ] , dtype = float )
U -= U . mean ( 0 )
U /= U . std ( 0 )
sp . savetxt ( ffile , U ) |
def _upload_resumable_all ( self , upload_info , bitmap , number_of_units , unit_size ) :
"""Prepare and upload all resumable units and return upload _ key
upload _ info - - UploadInfo object
bitmap - - bitmap node of upload / check
number _ of _ units - - number of units requested
unit _ size - - size of a single upload unit in bytes""" | fd = upload_info . fd
upload_key = None
for unit_id in range ( number_of_units ) :
upload_status = decode_resumable_upload_bitmap ( bitmap , number_of_units )
if upload_status [ unit_id ] :
logger . debug ( "Skipping unit %d/%d - already uploaded" , unit_id + 1 , number_of_units )
continue
logger . debug ( "Uploading unit %d/%d" , unit_id + 1 , number_of_units )
offset = unit_id * unit_size
with SubsetIO ( fd , offset , unit_size ) as unit_fd :
unit_info = _UploadUnitInfo ( upload_info = upload_info , hash_ = upload_info . hash_info . units [ unit_id ] , fd = unit_fd , uid = unit_id )
upload_result = self . _upload_resumable_unit ( unit_info )
# upload _ key is needed for polling
if upload_key is None :
upload_key = upload_result [ 'doupload' ] [ 'key' ]
return upload_key |
def confirm ( self , batch_id = None , filename = None ) :
"""Flags the batch as confirmed by updating
confirmation _ datetime on the history model for this batch .""" | if batch_id or filename :
export_history = self . history_model . objects . using ( self . using ) . filter ( Q ( batch_id = batch_id ) | Q ( filename = filename ) , sent = True , confirmation_code__isnull = True , )
else :
export_history = self . history_model . objects . using ( self . using ) . filter ( sent = True , confirmation_code__isnull = True )
if export_history . count ( ) == 0 :
raise ConfirmationError ( "Nothing to do. No history of sent and unconfirmed files" )
else :
confirmation_code = ConfirmationCode ( )
export_history . update ( confirmation_code = confirmation_code . identifier , confirmation_datetime = get_utcnow ( ) , )
return confirmation_code . identifier |
def get_content ( request , page_id , content_id ) :
"""Get the content for a particular page""" | content = Content . objects . get ( pk = content_id )
return HttpResponse ( content . body ) |
def item_enclosure_length ( self , item ) :
"""Try to obtain the size of the enclosure if it ' s present on the FS ,
otherwise returns an hardcoded value .
Note : this method is only called if item _ enclosure _ url
has returned something .""" | try :
return str ( item . image . size )
except ( AttributeError , ValueError , os . error ) :
pass
return '100000' |
def _extended_lookup ( datastore_api , project , key_pbs , missing = None , deferred = None , eventual = False , transaction_id = None , ) :
"""Repeat lookup until all keys found ( unless stop requested ) .
Helper function for : meth : ` Client . get _ multi ` .
: type datastore _ api :
: class : ` google . cloud . datastore . _ http . HTTPDatastoreAPI `
or : class : ` google . cloud . datastore _ v1 . gapic . DatastoreClient `
: param datastore _ api : The datastore API object used to connect
to datastore .
: type project : str
: param project : The project to make the request for .
: type key _ pbs : list of : class : ` . entity _ pb2 . Key `
: param key _ pbs : The keys to retrieve from the datastore .
: type missing : list
: param missing : ( Optional ) If a list is passed , the key - only entity
protobufs returned by the backend as " missing " will be
copied into it .
: type deferred : list
: param deferred : ( Optional ) If a list is passed , the key protobufs returned
by the backend as " deferred " will be copied into it .
: type eventual : bool
: param eventual : If False ( the default ) , request ` ` STRONG ` ` read
consistency . If True , request ` ` EVENTUAL ` ` read
consistency .
: type transaction _ id : str
: param transaction _ id : If passed , make the request in the scope of
the given transaction . Incompatible with
` ` eventual = = True ` ` .
: rtype : list of : class : ` . entity _ pb2 . Entity `
: returns : The requested entities .
: raises : : class : ` ValueError ` if missing / deferred are not null or
empty list .""" | if missing is not None and missing != [ ] :
raise ValueError ( "missing must be None or an empty list" )
if deferred is not None and deferred != [ ] :
raise ValueError ( "deferred must be None or an empty list" )
results = [ ]
loop_num = 0
read_options = helpers . get_read_options ( eventual , transaction_id )
while loop_num < _MAX_LOOPS : # loop against possible deferred .
loop_num += 1
lookup_response = datastore_api . lookup ( project , key_pbs , read_options = read_options )
# Accumulate the new results .
results . extend ( result . entity for result in lookup_response . found )
if missing is not None :
missing . extend ( result . entity for result in lookup_response . missing )
if deferred is not None :
deferred . extend ( lookup_response . deferred )
break
if len ( lookup_response . deferred ) == 0 :
break
# We have deferred keys , and the user didn ' t ask to know about
# them , so retry ( but only with the deferred ones ) .
key_pbs = lookup_response . deferred
return results |
def recv ( request_context = None , non_blocking = False ) :
"""Receives data from websocket .
: param request _ context :
: param bool non _ blocking :
: rtype : bytes | str
: raises IOError : If unable to receive a message .""" | if non_blocking :
result = uwsgi . websocket_recv_nb ( request_context )
else :
result = uwsgi . websocket_recv ( request_context )
return result |
def _distance_covariance_sqr_naive ( x , y , exponent = 1 ) :
"""Naive biased estimator for distance covariance .
Computes the unbiased estimator for distance covariance between two
matrices , using an : math : ` O ( N ^ 2 ) ` algorithm .""" | a = _distance_matrix ( x , exponent = exponent )
b = _distance_matrix ( y , exponent = exponent )
return mean_product ( a , b ) |
def relaxNGValidateFullElement ( self , doc , elem ) :
"""Validate a full subtree when
xmlRelaxNGValidatePushElement ( ) returned 0 and the content
of the node has been expanded .""" | if doc is None :
doc__o = None
else :
doc__o = doc . _o
if elem is None :
elem__o = None
else :
elem__o = elem . _o
ret = libxml2mod . xmlRelaxNGValidateFullElement ( self . _o , doc__o , elem__o )
return ret |
def export ( self , nidm_version , export_dir ) :
"""Create prov entities and activities .""" | if nidm_version [ 'major' ] < 1 or ( nidm_version [ 'major' ] == 1 and nidm_version [ 'minor' ] < 3 ) :
self . type = NIDM_DATA_SCALING
# Create " Data " entity
# FIXME : grand mean scaling ?
# FIXME : medianIntensity
self . add_attributes ( ( ( PROV [ 'type' ] , self . type ) , ( PROV [ 'type' ] , PROV [ 'Collection' ] ) , ( PROV [ 'label' ] , self . label ) , ( NIDM_GRAND_MEAN_SCALING , self . grand_mean_sc ) , ( NIDM_TARGET_INTENSITY , self . target_intensity ) ) )
if nidm_version [ 'major' ] > 1 or ( nidm_version [ 'major' ] == 1 and nidm_version [ 'minor' ] > 2 ) :
if self . mri_protocol is not None :
self . add_attributes ( [ ( NIDM_HAS_MRI_PROTOCOL , self . mri_protocol ) ] ) |
def remove ( self , force = False ) :
"""Remove this volume .
Args :
force ( bool ) : Force removal of volumes that were already removed
out of band by the volume driver plugin .
Raises :
: py : class : ` docker . errors . APIError `
If volume failed to remove .""" | return self . client . api . remove_volume ( self . id , force = force ) |
def _split_line ( s , parts ) :
"""Parameters
s : string
Fixed - length string to split
parts : list of ( name , length ) pairs
Used to break up string , name ' _ ' will be filtered from output .
Returns
Dict of name : contents of string at given location .""" | out = { }
start = 0
for name , length in parts :
out [ name ] = s [ start : start + length ] . strip ( )
start += length
del out [ '_' ]
return out |
def announce ( version ) :
"""Generates a new release announcement entry in the docs .""" | # Get our list of authors
stdout = check_output ( [ "git" , "describe" , "--abbrev=0" , "--tags" ] )
stdout = stdout . decode ( "utf-8" )
last_version = stdout . strip ( )
stdout = check_output ( [ "git" , "log" , "{}..HEAD" . format ( last_version ) , "--format=%aN" ] )
stdout = stdout . decode ( "utf-8" )
contributors = set ( stdout . splitlines ( ) )
template_name = ( "release.minor.rst" if version . endswith ( ".0" ) else "release.patch.rst" )
template_text = ( Path ( __file__ ) . parent . joinpath ( template_name ) . read_text ( encoding = "UTF-8" ) )
contributors_text = ( "\n" . join ( "* {}" . format ( name ) for name in sorted ( contributors ) ) + "\n" )
text = template_text . format ( version = version , contributors = contributors_text )
target = Path ( __file__ ) . parent . joinpath ( "../doc/en/announce/release-{}.rst" . format ( version ) )
target . write_text ( text , encoding = "UTF-8" )
print ( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}" )
# Update index with the new release entry
index_path = Path ( __file__ ) . parent . joinpath ( "../doc/en/announce/index.rst" )
lines = index_path . read_text ( encoding = "UTF-8" ) . splitlines ( )
indent = " "
for index , line in enumerate ( lines ) :
if line . startswith ( "{}release-" . format ( indent ) ) :
new_line = indent + target . stem
if line != new_line :
lines . insert ( index , new_line )
index_path . write_text ( "\n" . join ( lines ) + "\n" , encoding = "UTF-8" )
print ( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Updated {index_path.name}" )
else :
print ( f"{Fore.CYAN}[generate.announce] {Fore.RESET}Skip {index_path.name} (already contains release)" )
break
check_call ( [ "git" , "add" , str ( target ) ] ) |
def hour ( self , value = None ) :
"""Corresponds to IDD Field ` hour `
Args :
value ( int ) : value for IDD Field ` hour `
value > = 1
value < = 24
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value""" | if value is not None :
try :
value = int ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type int ' 'for field `hour`' . format ( value ) )
if value < 1 :
raise ValueError ( 'value need to be greater or equal 1 ' 'for field `hour`' )
if value > 24 :
raise ValueError ( 'value need to be smaller 24 ' 'for field `hour`' )
self . _hour = value |
def get_users ( self ) :
"""Get number of users .""" | no_users = Token . query . filter_by ( client_id = self . client_id , is_personal = False , is_internal = False ) . count ( )
return no_users |
def dest_fpath ( self , source_fpath : str ) -> str :
"""Calculates full path for end json - api file from source file full
path .""" | relative_fpath = os . path . join ( * source_fpath . split ( os . sep ) [ 1 : ] )
relative_dirpath = os . path . dirname ( relative_fpath )
source_fname = relative_fpath . split ( os . sep ) [ - 1 ]
base_fname = source_fname . split ( '.' ) [ 0 ]
dest_fname = f'{base_fname}.json'
return os . path . join ( self . dest_dir , relative_dirpath , dest_fname ) |
def clean ( self , * args , ** kwargs ) :
"""Call self . synchronizer . clean method""" | if self . synchronizer_path != 'None' and self . config : # call synchronizer custom clean
try :
self . synchronizer . load_config ( self . config )
self . synchronizer . clean ( )
except ImproperlyConfigured as e :
raise ValidationError ( e . message ) |
def pid ( self ) :
"""Get PID object for the Release record .""" | if self . model . status == ReleaseStatus . PUBLISHED and self . record :
fetcher = current_pidstore . fetchers [ current_app . config . get ( 'GITHUB_PID_FETCHER' ) ]
return fetcher ( self . record . id , self . record ) |
def pause ( self , unique_id , configs = None ) :
"""Issues a sigstop for the specified process
: Parameter unique _ id : the name of the process""" | pids = self . get_pid ( unique_id , configs )
if pids != constants . PROCESS_NOT_RUNNING_PID :
pid_str = ' ' . join ( str ( pid ) for pid in pids )
hostname = self . processes [ unique_id ] . hostname
with get_ssh_client ( hostname , username = runtime . get_username ( ) , password = runtime . get_password ( ) ) as ssh :
better_exec_command ( ssh , "kill -SIGSTOP {0}" . format ( pid_str ) , "PAUSING PROCESS {0}" . format ( unique_id ) ) |
def check_type_compatibility ( type_1_id , type_2_id ) :
"""When applying a type to a resource , it may be the case that the resource already
has an attribute specified in the new type , but the template which defines this
pre - existing attribute has a different unit specification to the new template .
This function checks for any situations where different types specify the same
attributes , but with different units .""" | errors = [ ]
type_1 = db . DBSession . query ( TemplateType ) . filter ( TemplateType . id == type_1_id ) . options ( joinedload_all ( 'typeattrs' ) ) . one ( )
type_2 = db . DBSession . query ( TemplateType ) . filter ( TemplateType . id == type_2_id ) . options ( joinedload_all ( 'typeattrs' ) ) . one ( )
template_1_name = type_1 . template . name
template_2_name = type_2 . template . name
type_1_attrs = set ( [ t . attr_id for t in type_1 . typeattrs ] )
type_2_attrs = set ( [ t . attr_id for t in type_2 . typeattrs ] )
shared_attrs = type_1_attrs . intersection ( type_2_attrs )
if len ( shared_attrs ) == 0 :
return [ ]
type_1_dict = { }
for t in type_1 . typeattrs :
if t . attr_id in shared_attrs :
type_1_dict [ t . attr_id ] = t
for ta in type_2 . typeattrs :
type_2_unit_id = ta . unit_id
type_1_unit_id = type_1_dict [ ta . attr_id ] . unit_id
fmt_dict = { 'template_1_name' : template_1_name , 'template_2_name' : template_2_name , 'attr_name' : ta . attr . name , 'type_1_unit_id' : type_1_unit_id , 'type_2_unit_id' : type_2_unit_id , 'type_name' : type_1 . name }
if type_1_unit_id is None and type_2_unit_id is not None :
errors . append ( "Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s with no units, while template" "%(template_2_name)s stores it with unit %(type_2_unit_id)s" % fmt_dict )
elif type_1_unit_id is not None and type_2_unit_id is None :
errors . append ( "Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s in %(type_1_unit_id)s." " Template %(template_2_name)s stores it with no unit." % fmt_dict )
elif type_1_unit_id != type_2_unit_id :
errors . append ( "Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s in %(type_1_unit_id)s, while" " template %(template_2_name)s stores it in %(type_2_unit_id)s" % fmt_dict )
return errors |
def plot ( data , headers = None , pconfig = None ) :
"""Helper HTML for a beeswarm plot .
: param data : A list of data dicts
: param headers : A list of Dicts / OrderedDicts with information
for the series , such as colour scales , min and
max values etc .
: return : HTML string""" | if headers is None :
headers = [ ]
if pconfig is None :
pconfig = { }
# Allow user to overwrite any given config for this plot
if 'id' in pconfig and pconfig [ 'id' ] and pconfig [ 'id' ] in config . custom_plot_config :
for k , v in config . custom_plot_config [ pconfig [ 'id' ] ] . items ( ) :
pconfig [ k ] = v
# Make a datatable object
dt = table_object . datatable ( data , headers , pconfig )
return make_plot ( dt ) |
def get ( self , session , fields = [ ] , ** kwargs ) :
'''taobao . items . get 搜索商品信息
根据传入的搜索条件 , 获取商品列表 ( 类似于淘宝页面上的商品搜索功能 , 但是只有搜索到的商品列表 , 不包含商品的ItemCategory列表 ) 只能获得商品的部分信息 , 商品的详细信息请通过taobao . item . get获取 如果只输入fields其他条件都不输入 , 系统会因为搜索条件不足而报错 。 不能通过设置cid = 0来查询 。''' | request = TOPRequest ( 'taobao.items.get' )
if not fields :
item = Item ( )
fields = item . fields
request [ 'fields' ] = fields
for k , v in kwargs . iteritems ( ) :
if k not in ( 'q' , 'nicks' , 'cid' , 'props' , 'product_id' , 'page_no' , 'order_by' , 'ww_status' , 'post_free' , 'location_state' , 'location_city' , 'is_3D' , 'start_score' , 'end_score' , 'start_volume' , 'end_volume' , 'one_station' , 'is_cod' , 'is_mall' , 'is_prepay' , 'genuine_security' , 'stuff_status' , 'start_price' , 'end_price' , 'page_size' , 'promoted_service' , 'is_xinpin' ) and v == None :
continue
if k == 'location_state' :
k = 'location.state'
if k == 'location_city' :
k = 'location.city'
request [ k ] = v
self . create ( self . execute ( request , session ) )
return self . items |
def delete ( context , force , yes , analysis_id ) :
"""Delete an analysis log from the database .""" | analysis_obj = context . obj [ 'store' ] . analysis ( analysis_id )
if analysis_obj is None :
print ( click . style ( 'analysis log not found' , fg = 'red' ) )
context . abort ( )
print ( click . style ( f"{analysis_obj.family}: {analysis_obj.status}" ) )
if analysis_obj . is_temp :
if yes or click . confirm ( f"remove analysis log?" ) :
analysis_obj . delete ( )
context . obj [ 'store' ] . commit ( )
print ( click . style ( f"analysis deleted: {analysis_obj.family}" , fg = 'blue' ) )
else :
if analysis_obj . is_deleted :
print ( click . style ( f"{analysis_obj.family}: already deleted" , fg = 'red' ) )
context . abort ( )
if Path ( analysis_obj . out_dir ) . exists ( ) :
root_dir = context . obj [ 'store' ] . families_dir
family_dir = analysis_obj . out_dir
if not force and ( len ( family_dir ) <= len ( root_dir ) or root_dir not in family_dir ) :
print ( click . style ( f"unknown analysis output dir: {analysis_obj.out_dir}" , fg = 'red' ) )
print ( click . style ( "use '--force' to override" ) )
context . abort ( )
if yes or click . confirm ( f"remove analysis output: {analysis_obj.out_dir}?" ) :
shutil . rmtree ( analysis_obj . out_dir , ignore_errors = True )
analysis_obj . is_deleted = True
context . obj [ 'store' ] . commit ( )
print ( click . style ( f"analysis deleted: {analysis_obj.family}" , fg = 'blue' ) )
else :
print ( click . style ( f"analysis output doesn't exist: {analysis_obj.out_dir}" , fg = 'red' ) )
context . abort ( ) |
def decline ( self , lemma , flatten = False , collatinus_dict = False ) :
"""Decline a lemma
. . warning : : POS are incomplete as we do not detect the type outside of verbs , participle and adjective .
: raise UnknownLemma : When the lemma is unknown to our data
: param lemma : Lemma ( Canonical form ) to decline
: type lemma : str
: param flatten : If set to True , returns a list of forms without natural language information about them
: type flatten : bool
: param collatinus _ dict : If sets to True , Dictionary of grammatically valid forms , including variants , with keys corresponding to morpho informations .
: type collatinus _ dict : bool
: return : List of tuple where first value is the form and second the pos , ie [ ( " sum " , " v1ppip - - - " ) ]
: rtype : list or dict""" | if lemma not in self . __lemmas__ :
raise UnknownLemma ( "%s is unknown" % lemma )
# Get data information
lemma_entry = self . __lemmas__ [ lemma ]
model = self . __models__ [ lemma_entry [ "model" ] ]
# Get the roots
roots = self . __getRoots ( lemma , model = model )
# Get the known forms in order
keys = sorted ( [ int ( key ) for key in model [ "des" ] . keys ( ) ] )
forms_data = [ ( key , model [ "des" ] [ str ( key ) ] ) for key in keys ]
# Generate the return dict
forms = { key : [ ] for key in keys }
for key , form_list in forms_data :
for form in form_list :
root_id , endings = tuple ( form )
for root in roots [ root_id ] :
for ending in endings :
forms [ key ] . append ( root + ending )
# sufd means we have the original forms of the parent but we add a suffix
if len ( model [ "sufd" ] ) : # For each constant form1
for key , iter_forms in forms . items ( ) :
new_forms = [ ]
# We add the constant suffix
for sufd in model [ "sufd" ] :
new_forms += [ form + sufd for form in iter_forms ]
forms [ key ] = new_forms
# If we need a secure version of the forms . For example , if we have variants
if len ( model [ "suf" ] ) :
cached_forms = { k : v + [ ] for k , v in forms . items ( ) }
# Making cache without using copy
# For each suffix
# The format is [ suffix characters , [ modified forms ] ]
for suffixes in model [ "suf" ] :
suffix , modified_forms = suffixes [ 0 ] , suffixes [ 1 ]
for modified_form in modified_forms :
forms [ modified_form ] += [ f + suffix for f in cached_forms [ modified_form ] ]
# We update with the new roots
# If some form do not exist , we delete them prehentively
if len ( model [ "abs" ] ) :
for abs_form in model [ "abs" ] :
if abs_form in forms :
del forms [ abs_form ]
if flatten :
return list ( [ form for case_forms in forms . values ( ) for form in case_forms ] )
elif collatinus_dict :
return forms
else :
return list ( [ ( form , self . __getPOS ( key ) ) for key , case_forms in forms . items ( ) for form in case_forms ] ) |
def get_direct_band_gap_dict ( self ) :
"""Returns a dictionary of information about the direct
band gap
Returns :
a dictionary of the band gaps indexed by spin
along with their band indices and k - point index""" | if self . is_metal ( ) :
raise ValueError ( "get_direct_band_gap_dict should" "only be used with non-metals" )
direct_gap_dict = { }
for spin , v in self . bands . items ( ) :
above = v [ np . all ( v > self . efermi , axis = 1 ) ]
min_above = np . min ( above , axis = 0 )
below = v [ np . all ( v < self . efermi , axis = 1 ) ]
max_below = np . max ( below , axis = 0 )
diff = min_above - max_below
kpoint_index = np . argmin ( diff )
band_indices = [ np . argmax ( below [ : , kpoint_index ] ) , np . argmin ( above [ : , kpoint_index ] ) + len ( below ) ]
direct_gap_dict [ spin ] = { "value" : diff [ kpoint_index ] , "kpoint_index" : kpoint_index , "band_indices" : band_indices }
return direct_gap_dict |
def _set_isns_vrf_instance ( self , v , load = False ) :
"""Setter method for isns _ vrf _ instance , mapped from YANG variable / isns / isns _ vrf / isns _ vrf _ instance ( isns - vrf - type )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ isns _ vrf _ instance is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ isns _ vrf _ instance ( ) directly .
YANG Description : This specifies iSNS VRF Instance .""" | parent = getattr ( self , "_parent" , None )
if parent is not None and load is False :
raise AttributeError ( "Cannot set keys directly when" + " within an instantiated list" )
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = RestrictedClassType ( base_type = RestrictedClassType ( base_type = long , restriction_dict = { 'range' : [ '0..4294967295' ] } , int_size = 32 ) , restriction_dict = { 'range' : [ u'1' ] } ) , is_leaf = True , yang_name = "isns-vrf-instance" , rest_name = "isns-vrf-instance" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'iSNS VRF forwarding value 1' , u'cli-drop-node-name' : None , u'hidden' : u'isns-vrf-instance' } } , is_keyval = True , namespace = 'urn:brocade.com:mgmt:brocade-isns' , defining_module = 'brocade-isns' , yang_type = 'isns-vrf-type' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """isns_vrf_instance must be of a type compatible with isns-vrf-type""" , 'defined-type' : "brocade-isns:isns-vrf-type" , 'generated-type' : """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1']}), is_leaf=True, yang_name="isns-vrf-instance", rest_name="isns-vrf-instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'iSNS VRF forwarding value 1', u'cli-drop-node-name': None, u'hidden': u'isns-vrf-instance'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-isns', defining_module='brocade-isns', yang_type='isns-vrf-type', is_config=True)""" , } )
self . __isns_vrf_instance = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def load_phonopy ( filename , structure , dim , symprec = 0.01 , primitive_matrix = None , factor = VaspToTHz , symmetrise = True , born = None , write_fc = False ) :
"""Load phonopy output and return an ` ` phonopy . Phonopy ` ` object .
Args :
filename ( str ) : Path to phonopy output . Can be any of ` ` FORCE _ SETS ` ` ,
` ` FORCE _ CONSTANTS ` ` , or ` ` force _ constants . hdf5 ` ` .
structure ( : obj : ` ~ pymatgen . core . structure . Structure ` ) : The unitcell
structure .
dim ( list ) : The supercell size , as a : obj : ` list ` of : obj : ` float ` .
symprec ( : obj : ` float ` , optional ) : The tolerance for determining the
crystal symmetry .
primitive _ matrix ( : obj : ` list ` , optional ) : The transformation matrix
from the conventional to primitive cell . Only required when the
conventional cell was used as the starting structure . Should be
provided as a 3x3 : obj : ` list ` of : obj : ` float ` .
factor ( : obj : ` float ` , optional ) : The conversion factor for phonon
frequency . Defaults to : obj : ` phonopy . units . VaspToTHz ` .
symmetrise ( : obj : ` bool ` , optional ) : Symmetrise the force constants .
Defaults to ` ` True ` ` .
born ( : obj : ` str ` , optional ) : Path to file containing Born effective
charges . Should be in the same format as the file produced by the
` ` phonopy - vasp - born ` ` script provided by phonopy .
write _ fc ( : obj : ` bool ` or : obj : ` str ` , optional ) : Write the force
constants to disk . If ` ` True ` ` , a ` ` FORCE _ CONSTANTS ` ` file will be
written . Alternatively , if set to ` ` " hdf5 " ` ` , a
` ` force _ constants . hdf5 ` ` file will be written . Defaults to
` ` False ` ` ( force constants not written ) .""" | unitcell = get_phonopy_structure ( structure )
num_atom = unitcell . get_number_of_atoms ( )
num_satom = determinant ( dim ) * num_atom
phonon = Phonopy ( unitcell , dim , primitive_matrix = primitive_matrix , factor = factor , symprec = symprec )
if 'FORCE_CONSTANTS' == filename or '.hdf5' in filename : # if force constants exist , use these to avoid recalculating them
if '.hdf5' in filename :
fc = file_IO . read_force_constants_hdf5 ( filename )
elif 'FORCE_CONSTANTS' == filename :
fc = file_IO . parse_FORCE_CONSTANTS ( filename = filename )
if fc . shape [ 0 ] != num_satom :
msg = ( "\nNumber of atoms in supercell is not consistent with the " "matrix shape of\nforce constants read from {}.\nPlease" "carefully check --dim." )
logging . error ( msg . format ( filename ) )
sys . exit ( )
phonon . set_force_constants ( fc )
elif 'FORCE_SETS' == filename : # load the force sets from file and calculate force constants
fs = file_IO . parse_FORCE_SETS ( )
if fs [ 'natom' ] != num_satom :
msg = ( "\nNumber of atoms in supercell is not consistent with the " "the data in FORCE_SETS\nPlease carefully check --dim." )
logging . error ( msg . format ( filename ) )
sys . exit ( )
phonon . set_displacement_dataset ( fs )
logging . info ( "Calculating force constants..." )
phonon . produce_force_constants ( )
if born : # load born parameters from a file
nac_params = file_IO . parse_BORN ( unitcell , filename = born )
# set the nac unit conversion factor manual , specific to VASP
nac_params [ 'factor' ] = Hartree * Bohr
phonon . set_nac_params ( nac_params )
if symmetrise :
phonon . symmetrize_force_constants ( )
if write_fc == 'hdf5' :
file_IO . write_force_constants_to_hdf5 ( phonon . get_force_constants ( ) )
logging . info ( "Force constants written to force_constants.hdf5." )
elif write_fc :
file_IO . write_FORCE_CONSTANTS ( phonon . get_force_constants ( ) )
logging . info ( "Force constants written to FORCE_CONSTANTS." )
return phonon |
def getObjectByPid ( self , pid ) :
"""Args :
pid : str
Returns :
str : URIRef of the entry identified by ` ` pid ` ` .""" | self . _check_initialized ( )
opid = rdflib . term . Literal ( pid )
res = [ o for o in self . subjects ( predicate = DCTERMS . identifier , object = opid ) ]
return res [ 0 ] |
async def pack_message ( wallet_handle : int , message : str , recipient_verkeys : list , sender_verkey : Optional [ str ] ) -> bytes :
"""Packs a message by encrypting the message and serializes it in a JWE - like format ( Experimental )
Note to use DID keys with this function you can call did . key _ for _ did to get key id ( verkey )
for specific DID .
# Params
command _ handle : command handle to map callback to user context .
wallet _ handle : wallet handler ( created by open _ wallet )
message : the message being sent as a string . If it ' s JSON formatted it should be converted to a string
recipient _ verkeys : a list of Strings which are recipient verkeys
sender _ verkey : the sender ' s verkey as a string . - > When None is passed in this parameter , anoncrypt mode is used
returns an Agent Wire Message format as a byte array . See HIPE 0028 for detailed formats""" | logger = logging . getLogger ( __name__ )
logger . debug ( "pack_message: >>> wallet_handle: %r, message: %r, recipient_verkeys: %r, sender_verkey: %r" , wallet_handle , message , recipient_verkeys , sender_verkey )
def transform_cb ( arr_ptr : POINTER ( c_uint8 ) , arr_len : c_uint32 ) :
return bytes ( arr_ptr [ : arr_len ] ) ,
if not hasattr ( pack_message , "cb" ) :
logger . debug ( "pack_message: Creating callback" )
pack_message . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , POINTER ( c_uint8 ) , c_uint32 ) , transform_cb )
c_wallet_handle = c_int32 ( wallet_handle )
msg_bytes = message . encode ( "utf-8" )
c_msg_len = c_uint32 ( len ( msg_bytes ) )
c_recipient_verkeys = c_char_p ( json . dumps ( recipient_verkeys ) . encode ( 'utf-8' ) )
c_sender_vk = c_char_p ( sender_verkey . encode ( 'utf-8' ) ) if sender_verkey is not None else None
res = await do_call ( 'indy_pack_message' , c_wallet_handle , msg_bytes , c_msg_len , c_recipient_verkeys , c_sender_vk , pack_message . cb )
logger . debug ( "pack_message: <<< res: %r" , res )
return res |
def tasks_all_replaced_predicate ( service_name , old_task_ids , task_predicate = None ) :
"""Returns whether ALL of old _ task _ ids have been replaced with new tasks
: param service _ name : the service name
: type service _ name : str
: param old _ task _ ids : list of original task ids as returned by get _ service _ task _ ids
: type old _ task _ ids : [ str ]
: param task _ predicate : filter to use when searching for tasks
: type task _ predicate : func
: return : True if none of old _ task _ ids are still present in the service
: rtype : bool""" | try :
task_ids = get_service_task_ids ( service_name , task_predicate )
except DCOSHTTPException :
print ( 'failed to get task ids for service {}' . format ( service_name ) )
task_ids = [ ]
print ( 'waiting for all task ids in "{}" to change:\n- old tasks: {}\n- current tasks: {}' . format ( service_name , old_task_ids , task_ids ) )
for id in task_ids :
if id in old_task_ids :
return False
# old task still present
if len ( task_ids ) < len ( old_task_ids ) : # new tasks haven ' t fully replaced old tasks
return False
return True |
def run ( self ) -> None :
"""Execute function calls on a separate thread .""" | while self . _running :
try :
future , function = self . _tx . get ( timeout = 0.1 )
except Empty :
continue
try :
LOG . debug ( "executing %s" , function )
result = function ( )
LOG . debug ( "returning %s" , result )
self . _loop . call_soon_threadsafe ( future . set_result , result )
except BaseException as e :
LOG . exception ( "returning exception %s" , e )
self . _loop . call_soon_threadsafe ( future . set_exception , e ) |
def from_stream ( cls , stream ) :
"""Return a | Tiff | instance containing the properties of the TIFF image
in * stream * .""" | parser = _TiffParser . parse ( stream )
px_width = parser . px_width
px_height = parser . px_height
horz_dpi = parser . horz_dpi
vert_dpi = parser . vert_dpi
return cls ( px_width , px_height , horz_dpi , vert_dpi ) |
def dirsWavFeatureExtraction ( dirNames , mt_win , mt_step , st_win , st_step , compute_beat = False ) :
'''Same as dirWavFeatureExtraction , but instead of a single dir it
takes a list of paths as input and returns a list of feature matrices .
EXAMPLE :
[ features , classNames ] =
a . dirsWavFeatureExtraction ( [ ' audioData / classSegmentsRec / noise ' , ' audioData / classSegmentsRec / speech ' ,
' audioData / classSegmentsRec / brush - teeth ' , ' audioData / classSegmentsRec / shower ' ] , 1 , 1 , 0.02 , 0.02 ) ;
It can be used during the training process of a classification model ,
in order to get feature matrices from various audio classes ( each stored in a seperate path )''' | # feature extraction for each class :
features = [ ]
classNames = [ ]
fileNames = [ ]
for i , d in enumerate ( dirNames ) :
[ f , fn , feature_names ] = dirWavFeatureExtraction ( d , mt_win , mt_step , st_win , st_step , compute_beat = compute_beat )
if f . shape [ 0 ] > 0 : # if at least one audio file has been found in the provided folder :
features . append ( f )
fileNames . append ( fn )
if d [ - 1 ] == os . sep :
classNames . append ( d . split ( os . sep ) [ - 2 ] )
else :
classNames . append ( d . split ( os . sep ) [ - 1 ] )
return features , classNames , fileNames |
def fai_from_bam ( ref_file , bam_file , out_file , data ) :
"""Create a fai index with only contigs in the input BAM file .""" | contigs = set ( [ x . contig for x in idxstats ( bam_file , data ) ] )
if not utils . file_uptodate ( out_file , bam_file ) :
with open ( ref . fasta_idx ( ref_file , data [ "config" ] ) ) as in_handle :
with file_transaction ( data , out_file ) as tx_out_file :
with open ( tx_out_file , "w" ) as out_handle :
for line in ( l for l in in_handle if l . strip ( ) ) :
if line . split ( ) [ 0 ] in contigs :
out_handle . write ( line )
return out_file |
def install_programmer ( programmer_id , programmer_options , replace_existing = False ) :
"""install programmer in programmers . txt .
: param programmer _ id : string identifier
: param programmer _ options : dict like
: param replace _ existing : bool
: rtype : None""" | doaction = 0
if programmer_id in programmers ( ) . keys ( ) :
log . debug ( 'programmer already exists: %s' , programmer_id )
if replace_existing :
log . debug ( 'remove programmer: %s' , programmer_id )
remove_programmer ( programmer_id )
doaction = 1
else :
doaction = 1
if doaction :
lines = bunch2properties ( programmer_id , programmer_options )
programmers_txt ( ) . write_lines ( [ '' ] + lines , append = 1 ) |
def python_2_nonzero_compatible ( klass ) :
"""Adds a ` _ _ nonzero _ _ ( ) ` method to classes that define a ` _ _ bool _ _ ( ) ` method ,
so boolean conversion works in Python 2 . Has no effect in Python 3.
: param klass : The class to modify . Must define ` _ _ bool _ _ ( ) ` .
: return : The possibly patched class .""" | if six . PY2 :
if '__bool__' not in klass . __dict__ :
raise ValueError ( '@python_2_nonzero_compatible cannot be applied to {0} because ' 'it doesn\'t define __bool__().' . format ( klass . __name__ ) )
klass . __nonzero__ = klass . __bool__
return klass |
def _GetDirectory ( self ) :
"""Retrieves a directory .
Returns :
APFSDirectory : directory or None if not available .""" | if self . _fsapfs_file_entry . number_of_sub_file_entries <= 0 :
return None
return APFSDirectory ( self . _file_system , self . path_spec ) |
def STORE_SLICE_3 ( self , instr ) :
'obj [ lower : upper ] = expr' | upper = self . ast_stack . pop ( )
lower = self . ast_stack . pop ( )
value = self . ast_stack . pop ( )
expr = self . ast_stack . pop ( )
kw = dict ( lineno = instr . lineno , col_offset = 0 )
slice = _ast . Slice ( lower = lower , step = None , upper = upper , ** kw )
subscr = _ast . Subscript ( value = value , slice = slice , ctx = _ast . Store ( ) , ** kw )
if isinstance ( expr , _ast . AugAssign ) :
assign = expr
result = cmp_ast ( expr . target , subscr )
assert result
else :
assign = _ast . Assign ( targets = [ subscr ] , value = expr , ** kw )
self . ast_stack . append ( assign ) |
def watch_variable ( self , tid , address , size , action = None ) :
"""Sets a hardware breakpoint at the given thread , address and size .
@ see : L { dont _ watch _ variable }
@ type tid : int
@ param tid : Thread global ID .
@ type address : int
@ param address : Memory address of variable to watch .
@ type size : int
@ param size : Size of variable to watch . The only supported sizes are :
byte ( 1 ) , word ( 2 ) , dword ( 4 ) and qword ( 8 ) .
@ type action : function
@ param action : ( Optional ) Action callback function .
See L { define _ hardware _ breakpoint } for more details .""" | bp = self . __set_variable_watch ( tid , address , size , action )
if not bp . is_enabled ( ) :
self . enable_hardware_breakpoint ( tid , address ) |
def run_worker ( worker_class , * args , ** kwargs ) :
'''Bridge function to run a worker under : mod : ` multiprocessing ` .
The : mod : ` multiprocessing ` module cannot
: meth : ` ~ multiprocessing . Pool . apply _ async ` to a class constructor ,
even if the ` ` _ _ init _ _ ` ` calls ` ` . run ( ) ` ` , so this simple wrapper
calls ` ` worker _ class ( * args , * * kwargs ) ` ` and logs any exceptions
before re - raising them .
This is usually only used to create a : class : ` HeadlessWorker ` , but
it does run through the complete
: meth : ` ~ rejester . Worker . register ` , : meth : ` ~ rejester . Worker . run ` ,
: meth : ` ~ rejester . Worker . unregister ` sequence with some logging
on worker - level failures .''' | try :
worker = worker_class ( * args , ** kwargs )
except Exception :
logger . critical ( 'failed to create worker {0!r}' . format ( worker_class ) , exc_info = True )
raise
# A note on style here :
# If this runs ForkWorker , ForkWorker will os . fork ( ) LoopWorker
# ( or SingleWorker ) children , and the child will have this in its
# call stack . Eventually the child will sys . exit ( ) , which raises
# SystemExit , which is an exception that will trickle back through
# here . If there is a try : . . . finally : block , the finally : block
# will execute on every child exit . except Exception : won ' t run
# on SystemExit .
try :
worker . register ( )
worker . run ( )
worker . unregister ( )
except Exception :
logger . error ( 'worker {0!r} died' . format ( worker_class ) , exc_info = True )
try :
worker . unregister ( )
except :
pass
raise |
def find_declaration ( declarations , decl_type = None , name = None , parent = None , recursive = True , fullname = None ) :
"""Returns single declaration that match criteria , defined by developer .
If more the one declaration was found None will be returned .
For more information about arguments see : class : ` match _ declaration _ t `
class .
: rtype : matched declaration : class : ` declaration _ t ` or None""" | decl = find_all_declarations ( declarations , decl_type = decl_type , name = name , parent = parent , recursive = recursive , fullname = fullname )
if len ( decl ) == 1 :
return decl [ 0 ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.