signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def coverage_score ( gold , pred , ignore_in_gold = [ ] , ignore_in_pred = [ ] ) :
"""Calculate ( global ) coverage .
Args :
gold : A 1d array - like of gold labels
pred : A 1d array - like of predicted labels ( assuming abstain = 0)
ignore _ in _ gold : A list of labels for which elements having that gold
label will be ignored .
ignore _ in _ pred : A list of labels for which elements having that pred
label will be ignored .
Returns :
A float , the ( global ) coverage score""" | gold , pred = _preprocess ( gold , pred , ignore_in_gold , ignore_in_pred )
return np . sum ( pred != 0 ) / len ( pred ) |
def search ( self , ** kwargs ) :
"""Method to search environments based on extends search .
: param search : Dict containing QuerySets to find environments .
: param include : Array containing fields to include on response .
: param exclude : Array containing fields to exclude on response .
: param fields : Array containing fields to override default fields .
: param kind : Determine if result will be detailed ( ' detail ' ) or basic ( ' basic ' ) .
: return : Dict containing environments""" | return super ( ApiEnvironment , self ) . get ( self . prepare_url ( 'api/v3/environment/' , kwargs ) ) |
def _ExtractSymbols ( self , descriptors ) :
"""Pulls out all the symbols from descriptor protos .
Args :
descriptors : The messages to extract descriptors from .
Yields :
A two element tuple of the type name and descriptor object .""" | for desc in descriptors :
yield ( _PrefixWithDot ( desc . full_name ) , desc )
for symbol in self . _ExtractSymbols ( desc . nested_types ) :
yield symbol
for enum in desc . enum_types :
yield ( _PrefixWithDot ( enum . full_name ) , enum ) |
def iq_query ( self , message : str ) :
"""Send data query to IQFeed API .""" | end_msg = '!ENDMSG!'
recv_buffer = 4096
# Send the historical data request message and buffer the data
self . _send_cmd ( message )
chunk = ""
data = ""
while True :
chunk = self . _sock . recv ( recv_buffer ) . decode ( 'latin-1' )
data += chunk
if chunk . startswith ( 'E,' ) : # error condition
if chunk . startswith ( 'E,!NO_DATA!' ) :
log . warn ( 'No data available for the given symbol or dates' )
return
else :
raise Exception ( chunk )
elif end_msg in chunk :
break
# Clean up the data .
data = data [ : - 1 * ( len ( end_msg ) + 3 ) ]
data = "" . join ( data . split ( "\r" ) )
data = data . replace ( ",\n" , "," ) [ : - 1 ]
data = data . split ( "," )
return data |
def delete ( rule : str , cls_method_name_or_view_fn : Optional [ Union [ str , Callable ] ] = None , * , defaults : Optional [ Defaults ] = _missing , endpoint : Optional [ str ] = _missing , is_member : Optional [ bool ] = _missing , only_if : Optional [ Union [ bool , Callable [ [ FlaskUnchained ] , bool ] ] ] = _missing , ** rule_options ) -> RouteGenerator :
"""Like : func : ` rule ` , except specifically for HTTP DELETE requests .
: param rule : The url rule for this route .
: param cls _ method _ name _ or _ view _ fn : The view function for this route .
: param is _ member : Whether or not this route is a member function .
: param only _ if : An optional function to decide at runtime whether or not to register
the route with Flask . It gets passed the configured app as a single
argument , and should return a boolean .
: param rule _ options : Keyword arguments that ultimately end up getting passed on to
: class : ` ~ werkzeug . routing . Rule `""" | rule_options . pop ( 'methods' , None )
yield Route ( rule , cls_method_name_or_view_fn , defaults = defaults , endpoint = endpoint , is_member = is_member , methods = [ 'DELETE' ] , only_if = only_if , ** rule_options ) |
def visit_BinOp ( self , node : ast . BinOp ) -> Any :
"""Recursively visit the left and right operand , respectively , and apply the operation on the results .""" | # pylint : disable = too - many - branches
left = self . visit ( node = node . left )
right = self . visit ( node = node . right )
if isinstance ( node . op , ast . Add ) :
result = left + right
elif isinstance ( node . op , ast . Sub ) :
result = left - right
elif isinstance ( node . op , ast . Mult ) :
result = left * right
elif isinstance ( node . op , ast . Div ) :
result = left / right
elif isinstance ( node . op , ast . FloorDiv ) :
result = left // right
elif isinstance ( node . op , ast . Mod ) :
result = left % right
elif isinstance ( node . op , ast . Pow ) :
result = left ** right
elif isinstance ( node . op , ast . LShift ) :
result = left << right
elif isinstance ( node . op , ast . RShift ) :
result = left >> right
elif isinstance ( node . op , ast . BitOr ) :
result = left | right
elif isinstance ( node . op , ast . BitXor ) :
result = left ^ right
elif isinstance ( node . op , ast . BitAnd ) :
result = left & right
elif isinstance ( node . op , ast . MatMult ) :
result = left @ right
else :
raise NotImplementedError ( "Unhandled op of {}: {}" . format ( node , node . op ) )
self . recomputed_values [ node ] = result
return result |
def open ( filename , frame = 'unspecified' ) :
"""Creates a PointCloudImage from a file .
Parameters
filename : : obj : ` str `
The file to load the data from . Must be one of . png , . jpg ,
. npy , or . npz .
frame : : obj : ` str `
A string representing the frame of reference in which the new image
lies .
Returns
: obj : ` PointCloudImage `
The new PointCloudImage .""" | data = Image . load_data ( filename )
return PointCloudImage ( data , frame ) |
def update_history ( self , directory ) :
"""Update browse history""" | try :
directory = osp . abspath ( to_text_string ( directory ) )
if directory in self . history :
self . histindex = self . history . index ( directory )
except Exception :
user_directory = get_home_dir ( )
self . chdir ( directory = user_directory , browsing_history = True ) |
def check_successful_tx ( web3 : Web3 , txid : str , timeout = 180 ) -> Tuple [ dict , dict ] :
"""See if transaction went through ( Solidity code did not throw ) .
: return : Transaction receipt and transaction info""" | receipt = wait_for_transaction_receipt ( web3 = web3 , txid = txid , timeout = timeout )
txinfo = web3 . eth . getTransaction ( txid )
if 'status' not in receipt :
raise KeyError ( 'A transaction receipt does not contain the "status" field. ' 'Does your chain have Byzantium rules enabled?' , )
if receipt [ 'status' ] == 0 :
raise ValueError ( f'Status 0 indicates failure' )
if txinfo [ 'gas' ] == receipt [ 'gasUsed' ] :
raise ValueError ( f'Gas is completely used ({txinfo["gas"]}). Failure?' )
return ( receipt , txinfo ) |
def get_new_actions ( self ) :
"""Wrapper function for do _ get _ new _ actions
For stats purpose
: return : None
TODO : Use a decorator for timing this function""" | try :
_t0 = time . time ( )
self . do_get_new_actions ( )
statsmgr . timer ( 'actions.got.time' , time . time ( ) - _t0 )
except RuntimeError :
logger . error ( "Exception like issue #1007" ) |
def get_queryset ( self ) :
'''We want to still be able to modify archived users , but they
shouldn ' t show up on list views .
We have an archived query param , where ' true ' shows archived , ' false '
omits them , and ' both ' shows both .''' | if self . action == 'list' :
active = get_true_false_both ( self . request . query_params , 'active' , 'true' )
if active == 'true' :
return self . queryset . filter ( is_active = True )
if active == 'false' :
return self . queryset . filter ( is_active = False )
return self . queryset |
def PrintIndented ( self , file , ident , code ) :
"""Takes an array , add indentation to each entry and prints it .""" | for entry in code :
print >> file , '%s%s' % ( ident , entry ) |
def execute_catch ( c , sql , vars = None ) :
"""Run a query , but ignore any errors . For error recovery paths where the error handler should not raise another .""" | try :
c . execute ( sql , vars )
except Exception as err :
cmd = sql . split ( ' ' , 1 ) [ 0 ]
log . error ( "Error executing %s: %s" , cmd , err ) |
def request_uniq ( func ) :
"""return unique dict for each uwsgi request .
note : won ' t work on non - uwsgi cases""" | def _wrapped ( * args , ** kwargs ) :
data = _get_request_unique_cache ( )
return func ( data , * args , ** kwargs )
return _wrapped |
def Recurrent ( step_model ) :
"""Apply a stepwise model over a sequence , maintaining state . For RNNs""" | ops = step_model . ops
def recurrent_fwd ( seqs , drop = 0.0 ) :
lengths = [ len ( X ) for X in seqs ]
X , size_at_t , unpad = ops . square_sequences ( seqs )
Y = ops . allocate ( ( X . shape [ 0 ] , X . shape [ 1 ] , step_model . nO ) )
cell_drop = ops . get_dropout_mask ( ( len ( seqs ) , step_model . nO ) , 0.0 )
hidden_drop = ops . get_dropout_mask ( ( len ( seqs ) , step_model . nO ) , 0.0 )
out_drop = ops . get_dropout_mask ( ( len ( seqs ) , step_model . nO ) , 0.0 )
backprops = [ None ] * max ( lengths )
state = step_model . weights . get_initial_state ( len ( seqs ) )
for t in range ( max ( lengths ) ) :
state = list ( state )
size = size_at_t [ t ]
Xt = X [ t , : size ]
state [ 0 ] = state [ 0 ] [ : size ]
state [ 1 ] = state [ 1 ] [ : size ]
if cell_drop is not None :
state [ 0 ] *= cell_drop
if hidden_drop is not None :
state [ 1 ] *= hidden_drop
inputs = ( state , Xt )
( state , Y [ t , : size ] ) , backprops [ t ] = step_model . begin_update ( inputs )
if out_drop is not None :
Y [ t , : size ] *= out_drop
outputs = unpad ( Y )
def recurrent_bwd ( d_outputs , sgd = None ) :
dY , size_at_t , unpad = step_model . ops . square_sequences ( d_outputs )
d_state = [ step_model . ops . allocate ( ( dY . shape [ 1 ] , step_model . nO ) ) , step_model . ops . allocate ( ( dY . shape [ 1 ] , step_model . nO ) ) , ]
updates = { }
def gather_updates ( weights , gradient , key = None ) :
updates [ key ] = ( weights , gradient )
dX = step_model . ops . allocate ( ( dY . shape [ 0 ] , dY . shape [ 1 ] , step_model . weights . nI ) )
for t in range ( max ( lengths ) - 1 , - 1 , - 1 ) :
if out_drop is not None :
dY [ t ] *= out_drop
d_state_t , dXt = backprops [ t ] ( ( d_state , dY [ t ] ) , sgd = gather_updates )
d_state [ 0 ] [ : d_state_t [ 0 ] . shape [ 0 ] ] = d_state_t [ 0 ]
d_state [ 1 ] [ : d_state_t [ 1 ] . shape [ 0 ] ] = d_state_t [ 1 ]
dX [ t , : dXt . shape [ 0 ] ] = dXt
if cell_drop is not None :
d_state [ 0 ] *= cell_drop
if hidden_drop is not None :
d_state [ 1 ] *= hidden_drop
d_cell , d_hidden = d_state
step_model . weights . d_initial_cells += d_cell . sum ( axis = 0 )
step_model . weights . d_initial_hiddens += d_hidden . sum ( axis = 0 )
if sgd is not None :
for key , ( weights , gradient ) in updates . items ( ) :
sgd ( weights , gradient , key = key )
return unpad ( dX )
return outputs , recurrent_bwd
model = wrap ( recurrent_fwd , step_model )
model . nO = step_model . nO
return model |
def render_twitter ( text , ** kwargs ) :
"""Strict template block for rendering twitter embeds .""" | author = render_author ( ** kwargs [ 'author' ] )
metadata = render_metadata ( ** kwargs [ 'metadata' ] )
image = render_image ( ** kwargs [ 'image' ] )
html = """
<div class="attachment attachment-twitter">
{author}
<p class="twitter-content">{text}</p>
{metadata}
{image}
</div>
""" . format ( author = author , text = text , metadata = metadata , image = image ) . strip ( )
return html |
def conv2d_trans ( ni : int , nf : int , ks : int = 2 , stride : int = 2 , padding : int = 0 , bias = False ) -> nn . ConvTranspose2d :
"Create ` nn . ConvTranspose2d ` layer ." | return nn . ConvTranspose2d ( ni , nf , kernel_size = ks , stride = stride , padding = padding , bias = bias ) |
def clean_ufo ( path ) :
"""Make sure old UFO data is removed , as it may contain deleted glyphs .""" | if path . endswith ( ".ufo" ) and os . path . exists ( path ) :
shutil . rmtree ( path ) |
def plot_joint_sfs ( s , ax = None , imshow_kwargs = None ) :
"""Plot a joint site frequency spectrum .
Parameters
s : array _ like , int , shape ( n _ chromosomes _ pop1 , n _ chromosomes _ pop2)
Joint site frequency spectrum .
ax : axes , optional
Axes on which to draw . If not provided , a new figure will be created .
imshow _ kwargs : dict - like
Additional keyword arguments , passed through to ax . imshow ( ) .
Returns
ax : axes
The axes on which the plot was drawn .""" | import matplotlib . pyplot as plt
from matplotlib . colors import LogNorm
# check inputs
s = asarray_ndim ( s , 2 )
# setup axes
if ax is None :
w = plt . rcParams [ 'figure.figsize' ] [ 0 ]
fig , ax = plt . subplots ( figsize = ( w , w ) )
# set plotting defaults
if imshow_kwargs is None :
imshow_kwargs = dict ( )
imshow_kwargs . setdefault ( 'cmap' , 'jet' )
imshow_kwargs . setdefault ( 'interpolation' , 'none' )
imshow_kwargs . setdefault ( 'aspect' , 'auto' )
imshow_kwargs . setdefault ( 'norm' , LogNorm ( ) )
# plot data
ax . imshow ( s . T , ** imshow_kwargs )
# tidy
ax . invert_yaxis ( )
ax . set_xlabel ( 'derived allele count (population 1)' )
ax . set_ylabel ( 'derived allele count (population 2)' )
return ax |
def predict ( self , input ) :
"""Submits an input batch to the model . Returns a MpsFloatArray
representing the model predictions . Calling asnumpy ( ) on this value will
wait for the batch to finish and yield the predictions as a numpy array .""" | assert self . _mode == MpsGraphMode . Inference
assert input . shape == self . _ishape
input_array = MpsFloatArray ( input )
result_handle = _ctypes . c_void_p ( )
status_code = self . _LIB . TCMPSPredictGraph ( self . handle , input_array . handle , _ctypes . byref ( result_handle ) )
assert status_code == 0 , "Error calling TCMPSPredictGraph"
assert result_handle , "TCMPSPredictGraph unexpectedly returned NULL pointer"
result = MpsFloatArray ( result_handle )
assert result . shape ( ) == self . _oshape
return result |
def last ( self ) :
"""Returns the last object matched or None if there is no
matching object .
> > > iterator = Host . objects . iterator ( )
> > > c = iterator . filter ( ' kali ' )
> > > if c . exists ( ) :
> > > print ( c . last ( ) )
Host ( name = kali - foo )
: return : element or None""" | if len ( self ) :
self . _params . update ( limit = 1 )
if 'filter' not in self . _params :
return list ( self ) [ - 1 ]
else : # Filter may not return results
result = list ( self )
if result :
return result [ - 1 ] |
def get_action_meanings ( self ) :
"""Return a list of actions meanings .""" | actions = sorted ( self . _action_meanings . keys ( ) )
return [ self . _action_meanings [ action ] for action in actions ] |
def tracking_number ( self , service : str = 'usps' ) -> str :
"""Generate random tracking number .
Supported services : USPS , FedEx and UPS .
: param str service : Post service .
: return : Tracking number .""" | service = service . lower ( )
if service not in ( 'usps' , 'fedex' , 'ups' ) :
raise ValueError ( 'Unsupported post service' )
services = { 'usps' : ( '#### #### #### #### ####' , '@@ ### ### ### US' , ) , 'fedex' : ( '#### #### ####' , '#### #### #### ###' , ) , 'ups' : ( '1Z@####@##########' , ) , }
mask = self . random . choice ( services [ service ] )
# type : ignore
return self . random . custom_code ( mask = mask ) |
def set_mappings ( self , mappings ) :
"""Applies VC mappings
: param mappings : mappings ( dict )""" | for source , destination in mappings . items ( ) :
if not isinstance ( source , str ) or not isinstance ( destination , str ) :
raise DynamipsError ( "Invalid Frame-Relay mappings" )
source_port , source_dlci = map ( int , source . split ( ':' ) )
destination_port , destination_dlci = map ( int , destination . split ( ':' ) )
if self . has_port ( destination_port ) :
if ( source_port , source_dlci ) not in self . _active_mappings and ( destination_port , destination_dlci ) not in self . _active_mappings :
log . info ( 'Frame Relay switch "{name}" [{id}]: mapping VC between port {source_port} DLCI {source_dlci} and port {destination_port} DLCI {destination_dlci}' . format ( name = self . _name , id = self . _id , source_port = source_port , source_dlci = source_dlci , destination_port = destination_port , destination_dlci = destination_dlci ) )
yield from self . map_vc ( source_port , source_dlci , destination_port , destination_dlci )
yield from self . map_vc ( destination_port , destination_dlci , source_port , source_dlci ) |
def sync_folder_to_container ( self , folder_path , container , delete = False , include_hidden = False , ignore = None , ignore_timestamps = False , object_prefix = "" , verbose = False ) :
"""Compares the contents of the specified folder , and checks to make sure
that the corresponding object is present in the specified container . If
there is no remote object matching the local file , it is created . If a
matching object exists , the etag is examined to determine if the object
in the container matches the local file ; if they differ , the container
is updated with the local file if the local file is newer when
` ignore _ timestamps ' is False ( default ) . If ` ignore _ timestamps ` is True ,
the object is overwritten with the local file contents whenever the
etags differ . NOTE : the timestamp of a remote object is the time it was
uploaded , not the original modification time of the file stored in that
object . Unless ' include _ hidden ' is True , files beginning with an
initial period are ignored .
If the ' delete ' option is True , any objects in the container that do
not have corresponding files in the local folder are deleted .
You can selectively ignore files by passing either a single pattern or
a list of patterns ; these will be applied to the individual folder and
file names , and any names that match any of the ' ignore ' patterns will
not be uploaded . The patterns should be standard * nix - style shell
patterns ; e . g . , ' * pyc ' will ignore all files ending in ' pyc ' , such as
' program . pyc ' and ' abcpyc ' .
If ` object _ prefix ` is set it will be appended to the object name when
it is checked and uploaded to the container . For example , if you use
sync _ folder _ to _ container ( " folderToSync / " , myContainer ,
object _ prefix = " imgFolder " ) it will upload the files to the
container / imgFolder / . . . instead of just container / . . .
Set ` verbose ` to True to make it print what is going on . It will
show which files are being uploaded and which ones are not and why .""" | cont = self . get_container ( container )
self . _local_files = [ ]
# Load a list of all the remote objects so we don ' t have to keep
# hitting the service
if verbose :
log = logging . getLogger ( "pyrax" )
log . info ( "Loading remote object list (prefix=%s)" , object_prefix )
data = cont . get_objects ( prefix = object_prefix , full_listing = True )
self . _remote_files = dict ( ( d . name , d ) for d in data )
self . _sync_summary = { "total" : 0 , "uploaded" : 0 , "ignored" : 0 , "older" : 0 , "duplicate" : 0 , "failed" : 0 , "failure_reasons" : [ ] , "deleted" : 0 , }
self . _sync_folder_to_container ( folder_path , cont , prefix = "" , delete = delete , include_hidden = include_hidden , ignore = ignore , ignore_timestamps = ignore_timestamps , object_prefix = object_prefix , verbose = verbose )
# Unset the _ remote _ files
self . _remote_files = None
if verbose : # Log the summary
summary = self . _sync_summary
log . info ( "Folder sync completed at %s" % time . ctime ( ) )
log . info ( " Total files processed: %s" % summary [ "total" ] )
log . info ( " Number Uploaded: %s" % summary [ "uploaded" ] )
log . info ( " Number Ignored: %s" % summary [ "ignored" ] )
log . info ( " Number Skipped (older): %s" % summary [ "older" ] )
log . info ( " Number Skipped (dupe): %s" % summary [ "duplicate" ] )
log . info ( " Number Deleted: %s" % summary [ "deleted" ] )
log . info ( " Number Failed: %s" % summary [ "failed" ] )
if summary [ "failed" ] :
for reason in summary [ "failure_reasons" ] :
log . info ( " Reason: %s" % reason ) |
def geometrize_shapes ( shapes : DataFrame , * , use_utm : bool = False ) -> DataFrame :
"""Given a GTFS shapes DataFrame , convert it to a GeoPandas
GeoDataFrame and return the result .
The result has a ` ` ' geometry ' ` ` column of WGS84 LineStrings
instead of the columns ` ` ' shape _ pt _ sequence ' ` ` , ` ` ' shape _ pt _ lon ' ` ` ,
` ` ' shape _ pt _ lat ' ` ` , and ` ` ' shape _ dist _ traveled ' ` ` .
If ` ` use _ utm ` ` , then use local UTM coordinates for the geometries .
Notes
Requires GeoPandas .""" | import geopandas as gpd
f = shapes . copy ( ) . sort_values ( [ "shape_id" , "shape_pt_sequence" ] )
def my_agg ( group ) :
d = { }
d [ "geometry" ] = sg . LineString ( group [ [ "shape_pt_lon" , "shape_pt_lat" ] ] . values )
return pd . Series ( d )
g = f . groupby ( "shape_id" ) . apply ( my_agg ) . reset_index ( )
g = gpd . GeoDataFrame ( g , crs = cs . WGS84 )
if use_utm :
lat , lon = f . loc [ 0 , [ "shape_pt_lat" , "shape_pt_lon" ] ] . values
crs = hp . get_utm_crs ( lat , lon )
g = g . to_crs ( crs )
return g |
def option_changed ( self , option , value ) :
"""Handle when the value of an option has changed""" | setattr ( self , to_text_string ( option ) , value )
self . shellwidget . set_namespace_view_settings ( )
if self . setup_in_progress is False :
self . sig_option_changed . emit ( option , value ) |
def parse_lookup_expression ( element ) :
"""This syntax parses lookups that are defined with their own element""" | lookup_grammar = r"""
lookup = _ "(" range? _ ( "(" _ number _ "," _ number _ ")" _ ","? _ )+ ")"
number = ("+"/"-")? ~r"\d+\.?\d*(e[+-]\d+)?"
_ = ~r"[\s\\]*" # whitespace character
range = _ "[" ~r"[^\]]*" "]" _ ","
"""
parser = parsimonious . Grammar ( lookup_grammar )
tree = parser . parse ( element [ 'expr' ] )
class LookupParser ( parsimonious . NodeVisitor ) :
def __init__ ( self , ast ) :
self . translation = ""
self . new_structure = [ ]
self . visit ( ast )
def visit__ ( self , n , vc ) : # remove whitespace
return ''
def visit_lookup ( self , n , vc ) :
pairs = max ( vc , key = len )
mixed_list = pairs . replace ( '(' , '' ) . replace ( ')' , '' ) . split ( ',' )
xs = mixed_list [ : : 2 ]
ys = mixed_list [ 1 : : 2 ]
string = "functions.lookup(x, [%(xs)s], [%(ys)s])" % { 'xs' : ',' . join ( xs ) , 'ys' : ',' . join ( ys ) }
self . translation = string
def generic_visit ( self , n , vc ) :
return '' . join ( filter ( None , vc ) ) or n . text
parse_object = LookupParser ( tree )
return { 'py_expr' : parse_object . translation , 'arguments' : 'x' } |
def _can_access_request ( self ) :
"""Can access current request object if all are true
- The serializer is the root .
- A request context was passed in .
- The request method is GET .""" | if self . parent :
return False
if not hasattr ( self , "context" ) or not self . context . get ( "request" , None ) :
return False
return self . context [ "request" ] . method == "GET" |
def dft ( blk , freqs , normalize = True ) :
"""Complex non - optimized Discrete Fourier Transform
Finds the DFT for values in a given frequency list , in order , over the data
block seen as periodic .
Parameters
blk :
An iterable with well - defined length . Don ' t use this function with Stream
objects !
freqs :
List of frequencies to find the DFT , in rad / sample . FFT implementations
like numpy . fft . ftt finds the coefficients for N frequencies equally
spaced as ` ` line ( N , 0 , 2 * pi , finish = False ) ` ` for N frequencies .
normalize :
If True ( default ) , the coefficient sums are divided by ` ` len ( blk ) ` ` ,
and the coefficient for the DC level ( frequency equals to zero ) is the
mean of the block . If False , that coefficient would be the sum of the
data in the block .
Returns
A list of DFT values for each frequency , in the same order that they appear
in the freqs input .
Note
This isn ' t a FFT implementation , and performs : math : ` O ( M . N ) ` float
pointing operations , with : math : ` M ` and : math : ` N ` equals to the length of
the inputs . This function can find the DFT for any specific frequency , with
no need for zero padding or finding all frequencies in a linearly spaced
band grid with N frequency bins at once .""" | dft_data = ( sum ( xn * cexp ( - 1j * n * f ) for n , xn in enumerate ( blk ) ) for f in freqs )
if normalize :
lblk = len ( blk )
return [ v / lblk for v in dft_data ]
return list ( dft_data ) |
def fix_e722 ( self , result ) :
"""fix bare except""" | ( line_index , _ , target ) = get_index_offset_contents ( result , self . source )
if BARE_EXCEPT_REGEX . search ( target ) :
self . source [ line_index ] = '{0}{1}' . format ( target [ : result [ 'column' ] - 1 ] , "except Exception:" ) |
def fit ( self , X , chunks ) :
"""Learn the RCA model .
Parameters
data : ( n x d ) data matrix
Each row corresponds to a single instance
chunks : ( n , ) array of ints
When ` ` chunks [ i ] = = - 1 ` ` , point i doesn ' t belong to any chunklet .
When ` ` chunks [ i ] = = j ` ` , point i belongs to chunklet j .""" | X = self . _prepare_inputs ( X , ensure_min_samples = 2 )
# PCA projection to remove noise and redundant information .
if self . pca_comps is not None :
pca = decomposition . PCA ( n_components = self . pca_comps )
X_t = pca . fit_transform ( X )
M_pca = pca . components_
else :
X_t = X - X . mean ( axis = 0 )
M_pca = None
chunks = np . asanyarray ( chunks , dtype = int )
chunk_mask , chunked_data = _chunk_mean_centering ( X_t , chunks )
inner_cov = np . atleast_2d ( np . cov ( chunked_data , rowvar = 0 , bias = 1 ) )
dim = self . _check_dimension ( np . linalg . matrix_rank ( inner_cov ) , X_t )
# Fisher Linear Discriminant projection
if dim < X_t . shape [ 1 ] :
total_cov = np . cov ( X_t [ chunk_mask ] , rowvar = 0 )
tmp = np . linalg . lstsq ( total_cov , inner_cov ) [ 0 ]
vals , vecs = np . linalg . eig ( tmp )
inds = np . argsort ( vals ) [ : dim ]
A = vecs [ : , inds ]
inner_cov = np . atleast_2d ( A . T . dot ( inner_cov ) . dot ( A ) )
self . transformer_ = _inv_sqrtm ( inner_cov ) . dot ( A . T )
else :
self . transformer_ = _inv_sqrtm ( inner_cov ) . T
if M_pca is not None :
self . transformer_ = np . atleast_2d ( self . transformer_ . dot ( M_pca ) )
return self |
def zoom ( image , factor , dimension , hdr = False , order = 3 ) :
"""Zooms the provided image by the supplied factor in the supplied dimension .
The factor is an integer determining how many slices should be put between each
existing pair .
If an image header ( hdr ) is supplied , its voxel spacing gets updated .
Returns the image and the updated header or false .""" | # check if supplied dimension is valid
if dimension >= image . ndim :
raise argparse . ArgumentError ( 'The supplied zoom-dimension {} exceeds the image dimensionality of 0 to {}.' . format ( dimension , image . ndim - 1 ) )
# get logger
logger = Logger . getInstance ( )
logger . debug ( 'Old shape = {}.' . format ( image . shape ) )
# perform the zoom
zoom = [ 1 ] * image . ndim
zoom [ dimension ] = ( image . shape [ dimension ] + ( image . shape [ dimension ] - 1 ) * factor ) / float ( image . shape [ dimension ] )
logger . debug ( 'Reshaping with = {}.' . format ( zoom ) )
image = interpolation . zoom ( image , zoom , order = order )
logger . debug ( 'New shape = {}.' . format ( image . shape ) )
if hdr :
new_spacing = list ( header . get_pixel_spacing ( hdr ) )
new_spacing [ dimension ] = new_spacing [ dimension ] / float ( factor + 1 )
logger . debug ( 'Setting pixel spacing from {} to {}....' . format ( header . get_pixel_spacing ( hdr ) , new_spacing ) )
header . set_pixel_spacing ( hdr , tuple ( new_spacing ) )
return image , hdr |
def subvol_create ( self , path ) :
"""Create a btrfs subvolume in the specified path
: param path : path to create""" | args = { 'path' : path }
self . _subvol_chk . check ( args )
self . _client . sync ( 'btrfs.subvol_create' , args ) |
def _get_auth_from_console ( self , realm ) :
"""Prompt for the user and password .""" | self . user , self . password = self . AUTH_MEMOIZE_INPUT . get ( realm , ( self . user , None ) )
if not self . auth_valid ( ) :
if not self . user :
login = getpass . getuser ( )
self . user = self . _raw_input ( 'Username for "{}" [{}]: ' . format ( realm , login ) ) or login
self . password = getpass . getpass ( 'Password for "{}": ' . format ( realm ) )
Credentials . AUTH_MEMOIZE_INPUT [ realm ] = self . user , self . password
return 'console' |
def requote_uri ( uri ) :
"""Requote uri if it contains non - ascii chars , spaces etc .""" | # To reduce tabulator import time
import requests . utils
if six . PY2 :
def url_encode_non_ascii ( bytes ) :
pattern = '[\x80-\xFF]'
replace = lambda c : ( '%%%02x' % ord ( c . group ( 0 ) ) ) . upper ( )
return re . sub ( pattern , replace , bytes )
parts = urlparse ( uri )
uri = urlunparse ( part . encode ( 'idna' ) if index == 1 else url_encode_non_ascii ( part . encode ( 'utf-8' ) ) for index , part in enumerate ( parts ) )
return requests . utils . requote_uri ( uri ) |
def _populate_lp ( self , dataset , ** kwargs ) :
"""Populate columns necessary for an LP dataset
This should not be called directly , but rather via : meth : ` Body . populate _ observable `
or : meth : ` System . populate _ observables `""" | logger . debug ( "{}._populate_lp(dataset={})" . format ( self . component , dataset ) )
profile_rest = kwargs . get ( 'profile_rest' , self . lp_profile_rest . get ( dataset ) )
rv_cols = self . _populate_rv ( dataset , ** kwargs )
cols = rv_cols
# rvs = ( rv _ cols [ ' rvs ' ] * u . solRad / u . d ) . to ( u . m / u . s ) . value
# cols [ ' dls ' ] = rv _ cols [ ' rvs ' ] * profile _ rest / c . c . si . value
return cols |
def scene_remove ( frames ) :
"""parse a scene . rm message""" | # " scene . rm " < scene _ id >
reader = MessageReader ( frames )
results = reader . string ( "command" ) . uint32 ( "scene_id" ) . assert_end ( ) . get ( )
if results . command != "scene.rm" :
raise MessageParserError ( "Command is not 'scene.rm'" )
return ( results . scene_id , ) |
def _send_command_raw ( self , command , opt = '' ) :
"""Description :
The TV doesn ' t handle long running connections very well ,
so we open a new connection every time .
There might be a better way to do this ,
but it ' s pretty quick and resilient .
Returns :
If a value is being requested ( opt2 is " ? " ) ,
then the return value is returned .
If a value is being set ,
it returns True for " OK " or False for " ERR " """ | # According to the documentation :
# http : / / files . sharpusa . com / Downloads / ForHome /
# HomeEntertainment / LCDTVs / Manuals / tel _ man _ LC40_46_52_60LE830U . pdf
# Page 58 - Communication conditions for IP
# The connection could be lost ( but not only after 3 minutes ) ,
# so we need to the remote commands to be sure about states
end_time = time . time ( ) + self . timeout
while time . time ( ) < end_time :
try : # Connect
sock_con = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
sock_con . settimeout ( self . connection_timeout )
sock_con . connect ( ( self . ip_address , self . port ) )
# Authenticate
sock_con . send ( self . auth )
sock_con . recv ( 1024 )
sock_con . recv ( 1024 )
# Send command
if opt != '' :
command += str ( opt )
sock_con . send ( str . encode ( command . ljust ( 8 ) + '\r' ) )
status = bytes . decode ( sock_con . recv ( 1024 ) ) . strip ( )
except ( OSError , socket . error ) as exp :
time . sleep ( 0.1 )
if time . time ( ) >= end_time :
raise exp
else :
sock_con . close ( )
# Sometimes the status is empty so
# We need to retry
if status != u'' :
break
if status == "OK" :
return True
elif status == "ERR" :
return False
else :
try :
return int ( status )
except ValueError :
return status |
def decode_hparams ( overrides = "" ) :
"""Hparams for decoding .""" | hparams = decoding . decode_hparams ( )
# Number of interpolations between [ 0.0 , 1.0 ] .
hparams . add_hparam ( "num_interp" , 11 )
# Which level ( s ) to interpolate .
hparams . add_hparam ( "level_interp" , [ 0 , 1 , 2 ] )
# " all " or " ranked " , interpolate all channels or a " ranked " .
hparams . add_hparam ( "channel_interp" , "all" )
# interpolate channels ranked according to squared L2 norm .
hparams . add_hparam ( "rank_interp" , 1 )
# Whether on not to save frames as summaries
hparams . add_hparam ( "save_frames" , True )
hparams . parse ( overrides )
return hparams |
def get ( self , resource_id = None ) :
"""Return an HTTP response object resulting from an HTTP GET call .
If * resource _ id * is provided , return just the single resource .
Otherwise , return the full collection .
: param resource _ id : The value of the resource ' s primary key""" | if request . path . endswith ( 'meta' ) :
return self . _meta ( )
if resource_id is None :
error_message = is_valid_method ( self . __model__ )
if error_message :
raise BadRequestException ( error_message )
if 'export' in request . args :
return self . _export ( self . _all_resources ( ) )
return flask . jsonify ( { self . __json_collection_name__ : self . _all_resources ( ) } )
else :
resource = self . _resource ( resource_id )
error_message = is_valid_method ( self . __model__ , resource )
if error_message :
raise BadRequestException ( error_message )
return jsonify ( resource ) |
def _prompt ( self , prompt = None ) :
"""Reads a line written by the user
: param prompt : An optional prompt message
: return : The read line , after a conversion to str""" | if prompt : # Print the prompt
self . write ( prompt )
self . output . flush ( )
# Read the line
return to_str ( self . input . readline ( ) ) |
def filter_slaves ( selfie , slaves ) :
"""Remove slaves that are in an ODOWN or SDOWN state
also remove slaves that do not have ' ok ' master - link - status""" | return [ ( s [ 'ip' ] , s [ 'port' ] ) for s in slaves if not s [ 'is_odown' ] and not s [ 'is_sdown' ] and s [ 'master-link-status' ] == 'ok' ] |
def ServicesGet ( self , sensor_id ) :
"""Retrieve services connected to a sensor in CommonSense .
If ServicesGet is successful , the result can be obtained by a call to getResponse ( ) and should be a json string .
@ sensor _ id ( int ) - Sensor id of sensor to retrieve services from .
@ return ( bool ) - Boolean indicating whether ServicesGet was successful .""" | if self . __SenseApiCall__ ( '/sensors/{0}/services.json' . format ( sensor_id ) , 'GET' ) :
return True
else :
self . __error__ = "api call unsuccessful"
return False |
def get_text_style ( text ) :
"""Return the text style dict for a text instance""" | style = { }
style [ 'alpha' ] = text . get_alpha ( )
if style [ 'alpha' ] is None :
style [ 'alpha' ] = 1
style [ 'fontsize' ] = text . get_size ( )
style [ 'color' ] = color_to_hex ( text . get_color ( ) )
style [ 'halign' ] = text . get_horizontalalignment ( )
# left , center , right
style [ 'valign' ] = text . get_verticalalignment ( )
# baseline , center , top
style [ 'malign' ] = text . _multialignment
# text alignment when ' \ n ' in text
style [ 'rotation' ] = text . get_rotation ( )
style [ 'zorder' ] = text . get_zorder ( )
return style |
def add_key ( service , key ) :
"""Add a key to a keyring .
Creates the keyring if it doesn ' t already exist .
Logs and returns if the key is already in the keyring .""" | keyring = _keyring_path ( service )
if os . path . exists ( keyring ) :
with open ( keyring , 'r' ) as ring :
if key in ring . read ( ) :
log ( 'Ceph keyring exists at %s and has not changed.' % keyring , level = DEBUG )
return
log ( 'Updating existing keyring %s.' % keyring , level = DEBUG )
cmd = [ 'ceph-authtool' , keyring , '--create-keyring' , '--name=client.{}' . format ( service ) , '--add-key={}' . format ( key ) ]
check_call ( cmd )
log ( 'Created new ceph keyring at %s.' % keyring , level = DEBUG ) |
def stats_timing ( stats_key , stats_logger ) :
"""Provide a transactional scope around a series of operations .""" | start_ts = now_as_float ( )
try :
yield start_ts
except Exception as e :
raise e
finally :
stats_logger . timing ( stats_key , now_as_float ( ) - start_ts ) |
def add_note ( self , note ) :
"""Add a note to the usernotes wiki page .
Arguments :
note : the note to be added ( Note )
Returns the update message for the usernotes wiki
Raises :
ValueError when the warning type of the note can not be found in the
stored list of warnings .""" | notes = self . cached_json
if not note . moderator :
note . moderator = self . r . user . me ( ) . name
# Get index of moderator in mod list from usernotes
# Add moderator to list if not already there
try :
mod_index = notes [ 'constants' ] [ 'users' ] . index ( note . moderator )
except ValueError :
notes [ 'constants' ] [ 'users' ] . append ( note . moderator )
mod_index = notes [ 'constants' ] [ 'users' ] . index ( note . moderator )
# Get index of warning type from warnings list
# Add warning type to list if not already there
try :
warn_index = notes [ 'constants' ] [ 'warnings' ] . index ( note . warning )
except ValueError :
if note . warning in Note . warnings :
notes [ 'constants' ] [ 'warnings' ] . append ( note . warning )
warn_index = notes [ 'constants' ] [ 'warnings' ] . index ( note . warning )
else :
raise ValueError ( 'Warning type not valid: ' + note . warning )
new_note = { 'n' : note . note , 't' : note . time , 'm' : mod_index , 'l' : note . link , 'w' : warn_index }
try :
notes [ 'users' ] [ note . username ] [ 'ns' ] . insert ( 0 , new_note )
except KeyError :
notes [ 'users' ] [ note . username ] = { 'ns' : [ new_note ] }
return '"create new note on user {}" via puni' . format ( note . username ) |
def setup_logging ( debug , logfile = None ) :
'''Setup logging format and log level .''' | if debug :
level = logging . DEBUG
else :
level = logging . INFO
if logfile is not None :
logging . basicConfig ( format = "%(asctime)s %(levelname)8s %(message)s" , datefmt = "%H:%M:%S %Y/%m/%d" , level = level , filename = logfile )
else :
logging . basicConfig ( format = "%(asctime)s %(levelname)8s %(message)s" , datefmt = "%H:%M:%S %Y/%m/%d" , level = level ) |
def get_subset_riverid_index_list ( self , river_id_list ) :
"""Gets the subset riverid _ list from the netcdf file
Optional returns include the list of valid river ids in the dataset
as well as a list of missing rive rids
Parameters
river _ id _ list : list or : obj : ` numpy . array `
Array of river ID ' s for the river segments you want the index of .
Returns
: obj : ` numpy . array `
A sorted array of the river index in the NetCDF file that
were found .
: obj : ` numpy . array `
A sorted array of the river IDs that were found .
list
An array of the missing river ids .""" | netcdf_river_indices_list = [ ]
valid_river_ids = [ ]
missing_river_ids = [ ]
for river_id in river_id_list : # get where streamids are in netcdf file
try :
netcdf_river_indices_list . append ( self . get_river_index ( river_id ) )
valid_river_ids . append ( river_id )
except IndexError :
log ( "ReachID {0} not found in netCDF dataset." " Skipping ..." . format ( river_id ) , "WARNING" )
missing_river_ids . append ( river_id )
np_valid_river_indices_list = np . array ( netcdf_river_indices_list )
np_valid_river_ids = np . array ( valid_river_ids )
sorted_indexes = np . argsort ( np_valid_river_indices_list )
return ( np_valid_river_indices_list [ sorted_indexes ] , np_valid_river_ids [ sorted_indexes ] , np . array ( missing_river_ids ) ) |
def kill_cursor ( self , cursor ) :
"""Kills the text selected by the give cursor .""" | text = cursor . selectedText ( )
if text :
cursor . removeSelectedText ( )
self . kill ( text ) |
def return_single_real_id_base ( dbpath , set_object , object_id ) :
"""Generic function which returns a real _ id string of an object specified by the object _ id
Parameters
dbpath : string , path to SQLite database file
set _ object : object ( either TestSet or TrainSet ) which is stored in the database
object _ id : int , id of object in database
Returns
real _ id : string""" | engine = create_engine ( 'sqlite:////' + dbpath )
session_cl = sessionmaker ( bind = engine )
session = session_cl ( )
tmp_object = session . query ( set_object ) . get ( object_id )
session . close ( )
return tmp_object . real_id |
def _flush_graph_val ( self ) :
"""Send all new and changed graph values to the database .""" | if not self . _graphvals2set :
return
delafter = { }
for graph , key , branch , turn , tick , value in self . _graphvals2set :
if ( graph , key , branch ) in delafter :
delafter [ graph , key , branch ] = min ( ( ( turn , tick ) , delafter [ graph , key , branch ] ) )
else :
delafter [ graph , key , branch ] = ( turn , tick )
self . sqlmany ( 'del_graph_val_after' , * ( ( graph , key , branch , turn , turn , tick ) for ( ( graph , key , branch ) , ( turn , tick ) ) in delafter . items ( ) ) )
self . sqlmany ( 'graph_val_insert' , * self . _graphvals2set )
self . _graphvals2set = [ ] |
def can_proceed ( self ) :
"""Checks whether app can proceed
: return : True iff app is not locked and times since last update < app
update interval""" | now = datetime . datetime . now ( )
delta = datetime . timedelta ( days = self . update_interval )
return now >= self . last_update + delta |
def list_shares ( self , prefix = None , marker = None , num_results = None , include_metadata = False , timeout = None ) :
'''Returns a generator to list the shares under the specified account .
The generator will lazily follow the continuation tokens returned by
the service and stop when all shares have been returned or num _ results
is reached .
If num _ results is specified and the account has more than that number of
shares , the generator will have a populated next _ marker field once it
finishes . This marker can be used to create a new generator if more
results are desired .
: param str prefix :
Filters the results to return only shares whose names
begin with the specified prefix .
: param int num _ results :
Specifies the maximum number of shares to return .
: param bool include _ metadata :
Specifies that share metadata be returned in the response .
: param str marker :
An opaque continuation token . This value can be retrieved from the
next _ marker field of a previous generator object if num _ results was
specified and that generator has finished enumerating results . If
specified , this generator will begin returning results from the point
where the previous generator stopped .
: param int timeout :
The timeout parameter is expressed in seconds .''' | include = 'metadata' if include_metadata else None
operation_context = _OperationContext ( location_lock = True )
kwargs = { 'prefix' : prefix , 'marker' : marker , 'max_results' : num_results , 'include' : include , 'timeout' : timeout , '_context' : operation_context }
resp = self . _list_shares ( ** kwargs )
return ListGenerator ( resp , self . _list_shares , ( ) , kwargs ) |
def create_system ( self , new_machine_id = False ) :
"""Create the machine via the API""" | client_hostname = determine_hostname ( )
machine_id = generate_machine_id ( new_machine_id )
branch_info = self . branch_info
if not branch_info :
return False
remote_branch = branch_info [ 'remote_branch' ]
remote_leaf = branch_info [ 'remote_leaf' ]
data = { 'machine_id' : machine_id , 'remote_branch' : remote_branch , 'remote_leaf' : remote_leaf , 'hostname' : client_hostname }
if self . config . display_name is not None :
data [ 'display_name' ] = self . config . display_name
data = json . dumps ( data )
post_system_url = self . api_url + '/v1/systems'
logger . debug ( "POST System: %s" , post_system_url )
logger . debug ( data )
net_logger . info ( "POST %s" , post_system_url )
return self . session . post ( post_system_url , headers = { 'Content-Type' : 'application/json' } , data = data ) |
def put ( self , page , payload , parms = None ) :
'''Puts an XML object on the server - used to update Redmine items . Returns nothing useful .''' | if self . readonlytest :
print 'Redmine read only test: Pretending to update: ' + page
else :
return self . open ( page , parms , payload , HTTPrequest = self . PUT_Request ) |
def load_presets ( self , presets_path = None ) :
"""Load presets from disk .
Read JSON formatted preset data from the specified path ,
or the default location at ` ` / var / lib / sos / presets ` ` .
: param presets _ path : a directory containing JSON presets .""" | presets_path = presets_path or self . presets_path
if not os . path . exists ( presets_path ) :
return
for preset_path in os . listdir ( presets_path ) :
preset_path = os . path . join ( presets_path , preset_path )
try :
preset_data = json . load ( open ( preset_path ) )
except ValueError :
continue
for preset in preset_data . keys ( ) :
pd = PresetDefaults ( preset , opts = SoSOptions ( ) )
data = preset_data [ preset ]
pd . desc = data [ DESC ] if DESC in data else ""
pd . note = data [ NOTE ] if NOTE in data else ""
if OPTS in data :
for arg in _arg_names :
if arg in data [ OPTS ] :
setattr ( pd . opts , arg , data [ OPTS ] [ arg ] )
pd . builtin = False
self . presets [ preset ] = pd |
def isbns ( self , key , value ) :
"""Populate the ` ` isbns ` ` key .""" | def _get_medium ( value ) :
def _normalize ( medium ) :
schema = load_schema ( 'hep' )
valid_media = schema [ 'properties' ] [ 'isbns' ] [ 'items' ] [ 'properties' ] [ 'medium' ] [ 'enum' ]
medium = medium . lower ( ) . replace ( '-' , '' ) . replace ( ' ' , '' )
if medium in valid_media :
return medium
elif medium == 'ebook' :
return 'online'
elif medium == 'paperback' :
return 'softcover'
return ''
medium = force_single_element ( value . get ( 'b' , '' ) )
normalized_medium = _normalize ( medium )
return normalized_medium
def _get_isbn ( value ) :
a_value = force_single_element ( value . get ( 'a' , '' ) )
normalized_a_value = a_value . replace ( '.' , '' )
if normalized_a_value :
return normalize_isbn ( normalized_a_value )
return { 'medium' : _get_medium ( value ) , 'value' : _get_isbn ( value ) , } |
def new ( type_dict , type_factory , * type_parameters ) :
"""Create a fully reified type from a type schema .""" | type_tuple = ( type_factory , ) + type_parameters
if type_tuple not in type_dict :
factory = TypeFactory . get_factory ( type_factory )
reified_type = factory . create ( type_dict , * type_parameters )
type_dict [ type_tuple ] = reified_type
return type_dict [ type_tuple ] |
def check_param ( param , param_name , dtype , constraint = None , iterable = True , max_depth = 2 ) :
"""checks the dtype of a parameter ,
and whether it satisfies a numerical contraint
Parameters
param : object
param _ name : str , name of the parameter
dtype : str , desired dtype of the parameter
contraint : str , default : None
numerical constraint of the parameter .
if None , no constraint is enforced
iterable : bool , default : True
whether to allow iterable param
max _ depth : int , default : 2
maximum nesting of the iterable .
only used if iterable = = True
Returns
list of validated and converted parameter ( s )""" | msg = [ ]
msg . append ( param_name + " must be " + dtype )
if iterable :
msg . append ( " or nested iterable of depth " + str ( max_depth ) + " containing " + dtype + "s" )
msg . append ( ", but found " + param_name + " = {}" . format ( repr ( param ) ) )
if constraint is not None :
msg = ( " " + constraint ) . join ( msg )
else :
msg = '' . join ( msg )
# check param is numerical
try :
param_dt = np . array ( flatten ( param ) )
# + np . zeros _ like ( flatten ( param ) , dtype = ' int ' )
# param _ dt = np . array ( param ) . astype ( dtype )
except ( ValueError , TypeError ) :
raise TypeError ( msg )
# check iterable
if iterable :
if check_iterable_depth ( param ) > max_depth :
raise TypeError ( msg )
if ( not iterable ) and isiterable ( param ) :
raise TypeError ( msg )
# check param is correct dtype
if not ( param_dt == np . array ( flatten ( param ) ) . astype ( float ) ) . all ( ) :
raise TypeError ( msg )
# check constraint
if constraint is not None :
if not ( eval ( 'np.' + repr ( param_dt ) + constraint ) ) . all ( ) :
raise ValueError ( msg )
return param |
def ensure_xpointer_compatibility ( node_id ) :
"""makes a given node ID xpointer compatible .
xpointer identifiers must not contain ' : ' , so we ' ll
replace it by ' _ ' .
Parameters
node _ id : str or unicode or int
a node or edge ID
Returns
xpointer _ id : str or unicode or int
int IDs are returned verbatim , str / unicode IDs are
returned with ' : ' replaced by ' _ '""" | assert isinstance ( node_id , ( int , str , unicode ) ) , "node ID must be an int, str or unicode, not" . format ( type ( node_id ) )
if isinstance ( node_id , ( str , unicode ) ) :
return FORBIDDEN_XPOINTER_RE . sub ( '_' , node_id )
else :
return node_id |
def create_gce_image ( zone , project , instance_name , name , description ) :
"""Shuts down the instance and creates and image from the disk .
Assumes that the disk name is the same as the instance _ name ( this is the
default behavior for boot disks on GCE ) .""" | disk_name = instance_name
try :
down_gce ( instance_name = instance_name , project = project , zone = zone )
except HttpError as e :
if e . resp . status == 404 :
log_yellow ( "the instance {} is already down" . format ( instance_name ) )
else :
raise e
body = { "rawDisk" : { } , "name" : name , "sourceDisk" : "projects/{}/zones/{}/disks/{}" . format ( project , zone , disk_name ) , "description" : description }
compute = _get_gce_compute ( )
gce_wait_until_done ( compute . images ( ) . insert ( project = project , body = body ) . execute ( ) )
return name |
def reload ( self , client = None ) :
"""Reload properties from Cloud Storage .
If : attr : ` user _ project ` is set , bills the API request to that project .
: type client : : class : ` ~ google . cloud . storage . client . Client ` or
` ` NoneType ` `
: param client : the client to use . If not passed , falls back to the
` ` client ` ` stored on the current object .""" | client = self . _require_client ( client )
query_params = self . _query_params
# Pass only ' ? projection = noAcl ' here because ' acl ' and related
# are handled via custom endpoints .
query_params [ "projection" ] = "noAcl"
api_response = client . _connection . api_request ( method = "GET" , path = self . path , query_params = query_params , headers = self . _encryption_headers ( ) , _target_object = self , )
self . _set_properties ( api_response ) |
def create ( self , rate , amount , order_type , pair ) :
'''create new order function
: param rate : float
: param amount : float
: param order _ type : str ; set ' buy ' or ' sell '
: param pair : str ; set ' btc _ jpy ' ''' | nonce = nounce ( )
payload = { 'rate' : rate , 'amount' : amount , 'order_type' : order_type , 'pair' : pair }
url = 'https://coincheck.com/api/exchange/orders'
body = 'rate={rate}&amount={amount}&order_type={order_type}&pair={pair}' . format ( ** payload )
message = nonce + url + body
signature = hmac . new ( self . secret_key . encode ( 'utf-8' ) , message . encode ( 'utf-8' ) , hashlib . sha256 ) . hexdigest ( )
headers = { 'ACCESS-KEY' : self . access_key , 'ACCESS-NONCE' : nonce , 'ACCESS-SIGNATURE' : signature }
r = requests . post ( url , headers = headers , data = body )
return json . loads ( r . text ) |
def _assign_uid ( self , sid ) :
"""Purpose : Assign a uid to the current object based on the sid passed""" | self . _uid = ru . generate_id ( 'task.%(item_counter)04d' , ru . ID_CUSTOM , namespace = sid ) |
def get_repository_config ( namespace , config , snapshot_id ) :
"""Get a method configuration from the methods repository .
Args :
namespace ( str ) : Methods namespace
config ( str ) : config name
snapshot _ id ( int ) : snapshot _ id of the method
Swagger :
https : / / api . firecloud . org / # ! / Method _ Repository / getMethodRepositoryConfiguration""" | uri = "configurations/{0}/{1}/{2}" . format ( namespace , config , snapshot_id )
return __get ( uri ) |
def paths ( self ) :
"""Get list of paths to look in for configuration data""" | filename = '.mbed_cloud_config.json'
return [ # Global config in / etc for * nix users
"/etc/%s" % filename , # Config file in home directory
os . path . join ( os . path . expanduser ( "~" ) , filename ) , # Config file in current directory
os . path . join ( os . getcwd ( ) , filename ) , # Config file specified using environment variable
os . environ . get ( self . path_from_env_key ) ] |
def properties_changed ( self , properties , changed_properties , invalidated_properties ) :
value = changed_properties . get ( 'Value' )
"""Called when a Characteristic property has changed .""" | if value is not None :
self . service . device . characteristic_value_updated ( characteristic = self , value = bytes ( value ) ) |
def _compute ( self , arrays , dates , assets , mask ) :
"""Compute our stored expression string with numexpr .""" | out = full ( mask . shape , self . missing_value , dtype = self . dtype )
# This writes directly into our output buffer .
numexpr . evaluate ( self . _expr , local_dict = { "x_%d" % idx : array for idx , array in enumerate ( arrays ) } , global_dict = { 'inf' : inf } , out = out , )
return out |
def by_filter ( cls , session , opts , ** kwargs ) :
"""Get packages from given filters .
: param session : SQLAlchemy session
: type session : : class : ` sqlalchemy . Session `
: param opts : filtering options
: type opts : ` dict
: return : package instances
: rtype : generator of : class : ` pyshop . models . Package `""" | where = [ ]
if opts . get ( 'local_only' ) :
where . append ( cls . local == True )
if opts . get ( 'names' ) :
where . append ( cls . name . in_ ( opts [ 'names' ] ) )
if opts . get ( 'classifiers' ) :
ids = [ c . id for c in opts . get ( 'classifiers' ) ]
cls_pkg = classifier__package
qry = session . query ( cls_pkg . c . package_id , func . count ( '*' ) )
qry = qry . filter ( cls_pkg . c . classifier_id . in_ ( ids ) )
qry = qry . group_by ( cls_pkg . c . package_id )
qry = qry . having ( func . count ( '*' ) >= len ( ids ) )
where . append ( cls . id . in_ ( [ r [ 0 ] for r in qry . all ( ) ] ) )
return cls . find ( session , where = where , ** kwargs ) |
def get_catalogue ( self , locale ) :
"""Reloads messages catalogue if requested after more than one second
since last reload""" | if locale is None :
locale = self . locale
if locale not in self . catalogues or datetime . now ( ) - self . last_reload > timedelta ( seconds = 1 ) :
self . _load_catalogue ( locale )
self . last_reload = datetime . now ( )
return self . catalogues [ locale ] |
def data ( self , data ) :
""": type : numppy . ndarray""" | self . _assert_shape ( data , self . _x_indexes , self . _y_indexes )
data [ data == - np . inf ] = 0.0
data [ data == np . inf ] = 0.0
self . _data = data
self . _min_value = np . nanmin ( self . data )
self . _max_value = np . nanmax ( self . data )
self . _data_x_indexes = list ( range ( data . shape [ 0 ] ) )
self . _data_y_indexes = list ( range ( data . shape [ 1 ] ) )
self . _dirty = False |
def reply_inform ( self , connection , inform , orig_req ) :
"""Send an inform as part of the reply to an earlier request .
Parameters
connection : ClientConnection object
The client to send the inform to .
inform : Message object
The inform message to send .
orig _ req : Message object
The request message being replied to . The inform message ' s
id is overridden with the id from orig _ req before the
inform is sent .""" | if isinstance ( connection , ClientRequestConnection ) :
self . _logger . warn ( 'Deprecation warning: do not use self.reply_inform() ' 'within a reply handler context -- ' 'use req.inform(*inform_arguments)\n' 'Traceback:\n %s' , "" . join ( traceback . format_stack ( ) ) )
# Get the underlying ClientConnection instance
connection = connection . client_connection
connection . reply_inform ( inform , orig_req ) |
def delete ( self , path ) :
"""Wrap the hvac delete call , using the right token for
cubbyhole interactions .""" | path = sanitize_mount ( path )
val = None
if path . startswith ( 'cubbyhole' ) :
self . token = self . initial_token
val = super ( Client , self ) . delete ( path )
self . token = self . operational_token
else :
super ( Client , self ) . delete ( path )
return val |
def make_bcbiornaseq_object ( data ) :
"""load the initial bcb . rda object using bcbioRNASeq""" | if "bcbiornaseq" not in dd . get_tools_on ( data ) :
return data
upload_dir = tz . get_in ( ( "upload" , "dir" ) , data )
report_dir = os . path . join ( upload_dir , "bcbioRNASeq" )
safe_makedir ( report_dir )
organism = dd . get_bcbiornaseq ( data ) . get ( "organism" , None )
groups = dd . get_bcbiornaseq ( data ) . get ( "interesting_groups" , None )
loadstring = create_load_string ( upload_dir , groups , organism )
r_file = os . path . join ( report_dir , "load_bcbioRNAseq.R" )
with file_transaction ( r_file ) as tmp_file :
memoize_write_file ( loadstring , tmp_file )
rcmd = Rscript_cmd ( )
with chdir ( report_dir ) :
do . run ( [ rcmd , "--no-environ" , r_file ] , "Loading bcbioRNASeq object." )
make_quality_report ( data )
return data |
def run ( self , task ) :
'''Runs a task and re - schedule it''' | self . _remove_dead_greenlet ( task . name )
if isinstance ( task . timer , types . GeneratorType ) : # Starts the task immediately
greenlet_ = gevent . spawn ( task . action , * task . args , ** task . kwargs )
self . active [ task . name ] . append ( greenlet_ )
try : # total _ seconds is available in Python 2.7
greenlet_later = gevent . spawn_later ( task . timer . next ( ) . total_seconds ( ) , self . run , task )
self . waiting [ task . name ] . append ( greenlet_later )
return greenlet_ , greenlet_later
except StopIteration :
pass
return greenlet_ , None
# Class based timer
try :
if task . timer . started is False :
delay = task . timer . next ( ) . total_seconds ( )
gevent . sleep ( delay )
greenlet_ = gevent . spawn ( task . action , * task . args , ** task . kwargs )
self . active [ task . name ] . append ( greenlet_ )
else :
greenlet_ = gevent . spawn ( task . action , * task . args , ** task . kwargs )
self . active [ task . name ] . append ( greenlet_ )
greenlet_later = gevent . spawn_later ( task . timer . next ( ) . total_seconds ( ) , self . run , task )
self . waiting [ task . name ] . append ( greenlet_later )
return greenlet_ , greenlet_later
except StopIteration :
pass
return greenlet_ , None |
def spawn_callback ( self , callback : Callable , * args : Any , ** kwargs : Any ) -> None :
"""Calls the given callback on the next IOLoop iteration .
As of Tornado 6.0 , this method is equivalent to ` add _ callback ` .
. . versionadded : : 4.0""" | self . add_callback ( callback , * args , ** kwargs ) |
def main ( arguments = None ) : # suppress ( unused - function )
"""Entry point for the linter .""" | result = _parse_arguments ( arguments )
linter_funcs = _ordered ( linter_functions_from_filters , result . whitelist , result . blacklist )
global_options = vars ( result )
tool_options = tool_options_from_global ( global_options , len ( result . files ) )
any_would_run = _any_would_run ( _run_lint_on_file_exceptions , result . files , result . stamp_file_path , result . log_technical_terms_to , linter_funcs , tool_options , result . fix_what_you_can )
if any_would_run :
for linter_function in linter_funcs . values ( ) :
if linter_function . before_all :
linter_function . before_all ( global_options , tool_options )
use_multiprocessing = _should_use_multiprocessing ( len ( result . files ) )
else :
use_multiprocessing = False
if use_multiprocessing :
mapper = parmap . map
else : # suppress ( E731)
mapper = lambda f , i , * a : [ f ( * ( ( x , ) + a ) ) for x in i ]
errors = list ( itertools . chain ( * mapper ( _run_lint_on_file_stamped , result . files , result . stamp_file_path , result . log_technical_terms_to , linter_funcs , tool_options , result . fix_what_you_can ) ) )
for error in sorted ( errors ) :
_report_lint_error ( error . failure , os . path . relpath ( error . absolute_path ) )
if any_would_run :
for linter_funcs in linter_funcs . values ( ) :
if linter_funcs . after_all :
linter_funcs . after_all ( global_options , tool_options )
return len ( errors ) |
def task_menu ( course , task , template_helper ) :
"""Displays the link to the scoreboards on the task page , if the plugin is activated for this course and the task is used in scoreboards""" | scoreboards = course . get_descriptor ( ) . get ( 'scoreboard' , [ ] )
try :
tolink = [ ]
for sid , scoreboard in enumerate ( scoreboards ) :
if task . get_id ( ) in scoreboard [ "content" ] :
tolink . append ( ( sid , scoreboard [ "name" ] ) )
if tolink :
return str ( template_helper . get_custom_renderer ( 'frontend/plugins/scoreboard' , layout = False ) . task_menu ( course , tolink ) )
return None
except :
return None |
def force_start ( self , infohash_list , value = True ) :
"""Force start selected torrents .
: param infohash _ list : Single or list ( ) of infohashes .
: param value : Force start value ( bool )""" | data = self . _process_infohash_list ( infohash_list )
data . update ( { 'value' : json . dumps ( value ) } )
return self . _post ( 'command/setForceStart' , data = data ) |
def get_all_unresolved ( self ) :
"""Returns a set of all unresolved imports .""" | assert self . final , 'Call build() before using the graph.'
out = set ( )
for v in self . broken_deps . values ( ) :
out |= v
return out |
def _build_master ( cls ) :
"""Prepare the master working set .""" | ws = cls ( )
try :
from __main__ import __requires__
except ImportError : # The main program does not list any requirements
return ws
# ensure the requirements are met
try :
ws . require ( __requires__ )
except VersionConflict :
return cls . _build_from_requirements ( __requires__ )
return ws |
def verify ( self , public_pair , val , sig ) :
""": param : public _ pair : a : class : ` Point < pycoin . ecdsa . Point . Point > ` on the curve
: param : val : an integer value
: param : sig : a pair of integers ` ` ( r , s ) ` ` representing an ecdsa signature
: returns : True if and only if the signature ` ` sig ` ` is a valid signature
of ` ` val ` ` using ` ` public _ pair ` ` public key .""" | order = self . _order
r , s = sig
if r < 1 or r >= order or s < 1 or s >= order :
return False
s_inverse = self . inverse ( s )
u1 = val * s_inverse
u2 = r * s_inverse
point = u1 * self + u2 * self . Point ( * public_pair )
v = point [ 0 ] % order
return v == r |
def convert ( self , inp ) :
"""Converts a string representation of some quantity of units into a
quantities object .
Args :
inp ( str ) : A textual representation of some quantity of units ,
e . g . , " fifty kilograms " .
Returns :
A quantities object representing the described quantity and its
units .""" | inp = self . _preprocess ( inp )
n = NumberService ( ) . longestNumber ( inp )
units = self . extractUnits ( inp )
# Convert to quantity object , attempt conversion
quantity = pq . Quantity ( float ( n ) , units [ 0 ] )
quantity . units = units [ 1 ]
return quantity |
def sign ( self , payload ) :
"""Sign payload using the supplied authenticator""" | if self . authenticator :
return self . authenticator . signed ( payload )
return payload |
def alter_change_column ( self , table , column , field ) :
"""Support change columns .""" | return self . _update_column ( table , column , lambda a , b : b ) |
def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d ( ) :
"""big 1d model for unconditional generation on imagenet .""" | hparams = image_transformer2d_base ( )
hparams . unconditional = True
hparams . hidden_size = 512
hparams . batch_size = 1
hparams . img_len = 64
hparams . num_heads = 8
hparams . filter_size = 2048
hparams . batch_size = 1
hparams . max_length = 3075
hparams . max_length = 14000
hparams . layer_preprocess_sequence = "none"
hparams . layer_postprocess_sequence = "dan"
hparams . layer_prepostprocess_dropout = 0.1
hparams . dec_attention_type = cia . AttentionType . LOCAL_2D
hparams . query_shape = ( 16 , 16 )
hparams . memory_flange = ( 8 , 8 )
return hparams |
def get_tensor_num_entries ( self , tensor_name , partial_layout = None , mesh_dimension_to_size = None ) :
"""The number of entries in a tensor .
If partial _ layout is specified , then mesh _ dimension _ to _ size must also be . In
this case , the number of entries on a single device is returned .
Args :
tensor _ name : a string , name of a tensor in the graph .
partial _ layout : an optional { string : string } , from MTF dimension name to
mesh dimension name .
mesh _ dimension _ to _ size : an optional { string : int } , from mesh dimension
name to size .
Returns :
an integer""" | shape = self . get_tensor_shape ( tensor_name )
# We don ' t have to worry about divisiblity issues because Mesh TensorFlow
# only allows evenly divisible assignments .
num_entries = 1
for dim in shape . dims :
num_entries = num_entries * dim . value
if not partial_layout :
return num_entries
for mtf_dimension_name in self . get_tensor_mtf_dimension_names ( tensor_name ) :
if mtf_dimension_name not in partial_layout :
continue
mesh_dimension_name = partial_layout [ mtf_dimension_name ]
mesh_dimension_size = mesh_dimension_to_size [ mesh_dimension_name ]
num_entries = int ( math . ceil ( num_entries / mesh_dimension_size ) )
return num_entries |
def get_feature_variable_boolean ( self , feature_key , variable_key , user_id , attributes = None ) :
"""Returns value for a certain boolean variable attached to a feature flag .
Args :
feature _ key : Key of the feature whose variable ' s value is being accessed .
variable _ key : Key of the variable whose value is to be accessed .
user _ id : ID for user .
attributes : Dict representing user attributes .
Returns :
Boolean value of the variable . None if :
- Feature key is invalid .
- Variable key is invalid .
- Mismatch with type of variable .""" | variable_type = entities . Variable . Type . BOOLEAN
return self . _get_feature_variable_for_type ( feature_key , variable_key , variable_type , user_id , attributes ) |
def get_db_references ( cls , entry ) :
"""get list of ` models . DbReference ` from XML node entry
: param entry : XML node entry
: return : list of : class : ` pyuniprot . manager . models . DbReference `""" | db_refs = [ ]
for db_ref in entry . iterfind ( "./dbReference" ) :
db_ref_dict = { 'identifier' : db_ref . attrib [ 'id' ] , 'type_' : db_ref . attrib [ 'type' ] }
db_refs . append ( models . DbReference ( ** db_ref_dict ) )
return db_refs |
def _check_hetcaller ( item ) :
"""Ensure upstream SV callers requires to heterogeneity analysis are available .""" | svs = _get_as_list ( item , "svcaller" )
hets = _get_as_list ( item , "hetcaller" )
if hets or any ( [ x in svs for x in [ "titancna" , "purecn" ] ] ) :
if not any ( [ x in svs for x in [ "cnvkit" , "gatk-cnv" ] ] ) :
raise ValueError ( "Heterogeneity caller used but need CNV calls. Add `gatk4-cnv` " "or `cnvkit` to `svcaller` in sample: %s" % item [ "description" ] ) |
def analyze ( self , count ) :
"""Analyze count data from : meth : ` PDFHistogram . count ` .
Turns an array of counts ( see : meth : ` PDFHistogram . count ` ) into a
histogram of probabilities , and estimates the mean , standard
deviation , and other statistical characteristics of the corresponding
probability distribution .
Args :
count ( array ) : Array of length ` ` nbin + 2 ` ` containing histogram
data where ` ` count [ 0 ] ` ` is the count for values that are
below the range of the histogram , ` ` count [ - 1 ] ` ` is the count
for values above the range , and ` ` count [ i ] ` ` is the count
for the ` ` i ` ` - th bin where ` ` i = 1 . . . nbin ` ` .
Returns a named tuple containing the following information ( in order ) :
* bins * : Array of bin edges for histogram ( length ` ` nbin + 1 ` ` )
* prob * : Array of probabilities for each bin .
* stats * : Statistical data about histogram . See : class : ` PDFStatistics ` .
* norm * : Convert counts into probabilities by dividing by ` ` norm ` ` .""" | if numpy . ndim ( count ) != 1 :
raise ValueError ( 'count must have dimension 1' )
if len ( count ) == len ( self . midpoints ) + 2 :
norm = numpy . sum ( count )
data = numpy . asarray ( count [ 1 : - 1 ] ) / norm
elif len ( count ) != len ( self . midpoints ) :
raise ValueError ( 'wrong data length: %s != %s' % ( len ( count ) , len ( self . midpoints ) ) )
else :
data = count
norm = 1.
mid = self . midpoints
stats = PDFStatistics ( histogram = ( self . bins , count ) )
return PDFHistogram . Histogram ( self . bins , data , stats , norm ) |
def save_macros ( self , filepath , macros ) :
"""Saves macros to file
Parameters
filepath : String
\t Path to macro file
macros : String
\t Macro code""" | io_error_text = _ ( "Error writing to file {filepath}." )
io_error_text = io_error_text . format ( filepath = filepath )
# Make sure that old macro file does not get lost on abort save
tmpfile = filepath + "~"
try :
wx . BeginBusyCursor ( )
self . main_window . grid . Disable ( )
with open ( tmpfile , "w" ) as macro_outfile :
macro_outfile . write ( macros )
# Move save file from temp file to filepath
try :
os . rename ( tmpfile , filepath )
except OSError : # No tmp file present
pass
except IOError :
try :
post_command_event ( self . main_window , self . StatusBarMsg , text = io_error_text )
except TypeError : # The main window does not exist any more
pass
return False
finally :
self . main_window . grid . Enable ( )
wx . EndBusyCursor ( ) |
def zoom_to_ligand ( self ) :
"""Zoom in too ligand and its interactions .""" | cmd . center ( self . ligname )
cmd . orient ( self . ligname )
cmd . turn ( 'x' , 110 )
# If the ligand is aligned with the longest axis , aromatic rings are hidden
if 'AllBSRes' in cmd . get_names ( "selections" ) :
cmd . zoom ( '%s or AllBSRes' % self . ligname , 3 )
else :
if self . object_exists ( self . ligname ) :
cmd . zoom ( self . ligname , 3 )
cmd . origin ( self . ligname ) |
def hasScoreBetterThan ( self , score ) :
"""Is there an HSP with a score better than a given value ?
@ return : A C { bool } , C { True } if there is at least one HSP in the
alignments for this title with a score better than C { score } .""" | # Note : Do not assume that HSPs in an alignment are sorted in
# decreasing order ( as they are in BLAST output ) . If we could
# assume that , we could just check the first HSP in each alignment .
for hsp in self . hsps ( ) :
if hsp . betterThan ( score ) :
return True
return False |
def start_stop_video ( self ) :
"""Start and stop the video , and change the button .""" | if self . parent . info . dataset is None :
self . parent . statusBar ( ) . showMessage ( 'No Dataset Loaded' )
return
# & is added automatically by PyQt , it seems
if 'Start' in self . idx_button . text ( ) . replace ( '&' , '' ) :
try :
self . update_video ( )
except IndexError as er :
lg . debug ( er )
self . idx_button . setText ( 'Not Available / Start' )
return
except OSError as er :
lg . debug ( er )
self . idx_button . setText ( 'NO VIDEO for this dataset' )
return
self . idx_button . setText ( 'Stop' )
elif 'Stop' in self . idx_button . text ( ) :
self . idx_button . setText ( 'Start' )
self . medialistplayer . stop ( )
self . t . stop ( ) |
def smallest_pair_diff ( pairs_list ) :
"""This function find the smallest difference in the pairs of the given list of tuples .
Examples :
smallest _ pair _ diff ( [ ( 3 , 5 ) , ( 1 , 7 ) , ( 10 , 3 ) , ( 1 , 2 ) ] ) - > 1
smallest _ pair _ diff ( [ ( 4 , 6 ) , ( 12 , 8 ) , ( 11 , 4 ) , ( 2 , 13 ) ] ) - > 2
smallest _ pair _ diff ( [ ( 5 , 17 ) , ( 3 , 9 ) , ( 12 , 5 ) , ( 3 , 24 ) ] ) - > 6
Args :
pairs _ list : A list of tuples .
Returns :
The smallest difference between the elements of the tuples in the list .""" | diff_list = [ abs ( a - b ) for a , b in pairs_list ]
min_diff = min ( diff_list )
return min_diff |
def sobol ( N , dim , scrambled = 1 ) :
"""Sobol sequence .
Parameters
N : int
length of sequence
dim : int
dimension
scrambled : int
which scrambling method to use :
+ 0 : no scrambling
+ 1 : Owen ' s scrambling
+ 2 : Faure - Tezuka
+ 3 : Owen + Faure - Tezuka
Returns
( N , dim ) numpy array .
Notes
For scrambling , seed is set randomly .
Fun fact : this venerable but playful piece of Fortran code occasionally
returns numbers above 1 . ( i . e . for a very small number of seeds ) ; when this
happen we just start over ( since the seed is randomly generated ) .""" | while ( True ) :
seed = np . random . randint ( 2 ** 32 )
out = lowdiscrepancy . sobol ( N , dim , scrambled , seed , 1 , 0 )
if ( scrambled == 0 ) or ( ( out < 1. ) . all ( ) and ( out > 0. ) . all ( ) ) : # no need to test if scrambled = = 0
return out |
def write_data ( self , buf ) :
"""Send data to the device .
If the write fails for any reason , an : obj : ` IOError ` exception
is raised .
: param buf : the data to send .
: type buf : list ( int )
: return : success status .
: rtype : bool""" | if sys . version_info [ 0 ] < 3 :
str_buf = '' . join ( map ( chr , buf ) )
else :
str_buf = bytes ( buf )
result = self . dev . controlWrite ( libusb1 . LIBUSB_ENDPOINT_OUT | libusb1 . LIBUSB_TYPE_CLASS | libusb1 . LIBUSB_RECIPIENT_INTERFACE , libusb1 . LIBUSB_REQUEST_SET_CONFIGURATION , 0x200 , 0 , str_buf , timeout = 50 )
if result != len ( buf ) :
raise IOError ( 'pywws.device_libusb1.USBDevice.write_data failed' )
return True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.