signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def allow_migrate ( self , db , model ) :
"""Make sure the old _ nodeshot app only appears in the ' old _ nodeshot ' database""" | if db != 'old_nodeshot' or model . _meta . app_label != 'oldimporter' :
return False
return True |
def _suffix_rules ( token , tag = "NN" ) :
"""Default morphological tagging rules for English , based on word suffixes .""" | if isinstance ( token , ( list , tuple ) ) :
token , tag = token
if token . endswith ( "ing" ) :
tag = "VBG"
if token . endswith ( "ly" ) :
tag = "RB"
if token . endswith ( "s" ) and not token . endswith ( ( "is" , "ous" , "ss" ) ) :
tag = "NNS"
if token . endswith ( ( "able" , "al" , "ful" , "ible" , "ient" , "ish" , "ive" , "less" , "tic" , "ous" ) ) or "-" in token :
tag = "JJ"
if token . endswith ( "ed" ) :
tag = "VBN"
if token . endswith ( ( "ate" , "ify" , "ise" , "ize" ) ) :
tag = "VBP"
return [ token , tag ] |
def get_context ( self , name , value , attrs ) :
"""Return the context to use with the widget ' s template , e . g .
{ ' widget ' : {
' attrs ' : { ' id ' : " id _ repeat " , ' required ' : True } ,
' is _ hidden ' : False ,
' name ' : " repeat " ,
' required ' : True ,
' subwidgets ' : [ . . . the context of all the component subwigets . . . ] ,
' template _ name ' : " joyous / widgets / recurrence _ widget . html " ,
' value ' : " Tuesdays " ,
' value _ s ' : " Tuesdays " ,
' value _ r ' : " DTSTART : 20181201 \n RRULE : FREQ = WEEKLY ; WKST = SU ; BYDAY = TU " ,""" | context = super ( ) . get_context ( name , value , attrs )
context [ 'widget' ] [ 'value_s' ] = str ( value )
context [ 'widget' ] [ 'value_r' ] = repr ( value )
return context |
def _write_validate ( self ) :
"""In addition to constructor validation steps , run validation steps
for writing .""" | if self . colorspace is None :
msg = ( "Writing colr boxes without enumerated " "colorspaces is not supported at this time." )
self . _dispatch_validation_error ( msg , writing = True )
if self . icc_profile is None :
if self . colorspace not in [ SRGB , GREYSCALE , YCC ] :
msg = ( "Colorspace should correspond to one of SRGB, " "GREYSCALE, or YCC." )
self . _dispatch_validation_error ( msg , writing = True )
self . _validate ( writing = True ) |
def build_for_each ( self , db , safe_mode = False , extra = None ) :
"""Builds the for - each context .""" | result = dict ( )
for var , query in iteritems ( self . for_each ) :
result [ var ] = db . query ( query , additional_locals = extra , safe_mode = safe_mode )
return result |
def remove_this_tlink ( self , tlink_id ) :
"""Removes the tlink for the given tlink identifier
@ type tlink _ id : string
@ param tlink _ id : the tlink identifier to be removed""" | for tlink in self . get_tlinks ( ) :
if tlink . get_id ( ) == tlink_id :
self . node . remove ( tlink . get_node ( ) )
break |
def _Parse ( self ) :
"""Extracts sections and volumes from the volume system .""" | root_file_entry = self . _file_system . GetRootFileEntry ( )
tsk_volume = self . _file_system . GetTSKVolume ( )
self . bytes_per_sector = tsk_partition . TSKVolumeGetBytesPerSector ( tsk_volume )
for sub_file_entry in root_file_entry . sub_file_entries :
tsk_vs_part = sub_file_entry . GetTSKVsPart ( )
start_sector = tsk_partition . TSKVsPartGetStartSector ( tsk_vs_part )
number_of_sectors = tsk_partition . TSKVsPartGetNumberOfSectors ( tsk_vs_part )
if start_sector is None or number_of_sectors is None :
continue
if tsk_partition . TSKVsPartIsAllocated ( tsk_vs_part ) :
volume = TSKVolume ( sub_file_entry , self . bytes_per_sector )
self . _AddVolume ( volume )
volume_extent = volume_system . VolumeExtent ( start_sector * self . bytes_per_sector , number_of_sectors * self . bytes_per_sector )
self . _sections . append ( volume_extent ) |
def map_leafs ( func , mapping ) :
"""Map a function to the leafs of a mapping .""" | def _inner ( mapping , path = None ) :
if path is None :
path = [ ]
for key , val in mapping . items ( ) :
if isinstance ( val , collections . Mapping ) :
_inner ( val , path + [ key ] )
else :
mapping [ key ] = func ( val , path = path + [ key ] )
return mapping
return _inner ( copy . deepcopy ( mapping ) ) |
def load_post ( wp_post_id ) :
"""Called from load _ post _ webhook .
This builds a generic WPAPILoader and uses its load _ post ( ) to insert / update content for the post .
: param wp _ post _ id : the WordPress post ID to load
: return : None""" | # wait a bit to give WordPress REST API a chance to catch up
time . sleep ( 1 )
loader = WPAPILoader ( )
post = loader . load_post ( wp_post_id )
if post :
logger . info ( "Successfully loaded post wp_post_id=%s, pk=%s" , wp_post_id , post . pk )
else :
logger . warning ( "Error loading post wp_post_id=%s" , wp_post_id ) |
def resource_tags ( self ) :
"""Resource tags for this processing logic .
Tags are a mechanism for differentiating and identifying resources that have different physical characteristics or logical uses . For example a resource ( host ) that has external connectivity for public data sources may be tagged ` ingest ` .
Processing logic can be associated with one or more tags to require
running on suitably tagged resources . For example
adding tags ` ingest ` and ` db ` requires that the processing element
containing the callable that created the stream runs on a host
tagged with both ` ingest ` and ` db ` .
A : py : class : ` ~ streamsx . topology . topology . Stream ` that was not created directly with a Python callable
cannot have tags associated with it . For example a stream that
is a : py : meth : ` ~ streamsx . topology . topology . Stream . union ` of multiple streams cannot be tagged .
In this case this method returns an empty ` frozenset ` which
cannot be modified .
See https : / / www . ibm . com / support / knowledgecenter / en / SSCRJU _ 4.2.1 / com . ibm . streams . admin . doc / doc / tags . html for more details of tags within IBM Streams .
Returns :
set : Set of resource tags , initially empty .
. . warning : : If no resources exist with the required tags then job submission will fail .
. . versionadded : : 1.7
. . versionadded : : 1.9 Support for : py : class : ` Sink ` and : py : class : ` ~ streamsx . spl . op . Invoke ` .""" | try :
plc = self . _op ( ) . _placement
if not 'resourceTags' in plc :
plc [ 'resourceTags' ] = set ( )
return plc [ 'resourceTags' ]
except TypeError :
return frozenset ( ) |
def _get_coord_axis_map ( self , ds ) :
'''Returns a dictionary mapping each coordinate to a letter identifier
describing the _ kind _ of coordinate .
: param netCDF4 . Dataset ds : An open netCDF dataset
: rtype : dict
: return : A dictionary with variable names mapped to axis abbreviations ,
i . e . { ' longitude ' : ' X ' , . . . ' pressure ' : ' Z ' }''' | expected = [ 'T' , 'Z' , 'Y' , 'X' ]
coord_vars = self . _find_coord_vars ( ds )
coord_axis_map = { }
# L - Unlimited Coordinates
# T - Time coordinates
# Z - Depth / Altitude Coordinate
# Y - Y - Coordinate ( latitude )
# X - X - Coordinate ( longitude )
# A - Auxiliary Coordinate
# I - Instance Coordinate
time_variables = cfutil . get_time_variables ( ds )
lat_variables = cfutil . get_latitude_variables ( ds )
lon_variables = cfutil . get_longitude_variables ( ds )
z_variables = cfutil . get_z_variables ( ds )
for coord_name in coord_vars :
coord_var = ds . variables [ coord_name ]
axis = getattr ( coord_var , 'axis' , None )
standard_name = getattr ( coord_var , 'standard_name' , None )
# Unlimited dimensions must come first
if ds . dimensions [ coord_name ] . isunlimited ( ) :
coord_axis_map [ coord_name ] = 'L'
# axis takes precedence over standard _ name
elif axis in expected :
coord_axis_map [ coord_name ] = axis
elif standard_name == 'time' :
coord_axis_map [ coord_name ] = 'T'
elif standard_name == 'longitude' :
coord_axis_map [ coord_name ] = 'X'
elif standard_name == 'latitude' :
coord_axis_map [ coord_name ] = 'Y'
elif standard_name in [ 'height' , 'depth' , 'altitude' ] :
coord_axis_map [ coord_name ] = 'Z'
elif cfutil . is_compression_coordinate ( ds , coord_name ) :
coord_axis_map [ coord_name ] = 'C'
elif coord_name in time_variables :
coord_axis_map [ coord_name ] = 'T'
elif coord_name in z_variables :
coord_axis_map [ coord_name ] = 'Z'
elif coord_name in lat_variables :
coord_axis_map [ coord_name ] = 'Y'
elif coord_name in lon_variables :
coord_axis_map [ coord_name ] = 'X'
else : # mark the coordinate variable as unknown
coord_axis_map [ coord_name ] = 'U'
for dimension in self . _get_instance_dimensions ( ds ) :
if dimension not in coord_axis_map :
coord_axis_map [ dimension ] = 'I'
# Dimensions of auxiliary coordinate variables will be marked with A .
# This is useful to help determine if the dimensions are used like a
# mapping from grid coordinates to physical lat / lon
for coord_name in self . _find_aux_coord_vars ( ds ) :
coord_var = ds . variables [ coord_name ]
# Skip label auxiliary coordinates
if coord_var . dtype . char == 'S' :
continue
for dimension in coord_var . dimensions :
if dimension not in coord_axis_map :
coord_axis_map [ dimension ] = 'A'
# If a dimension does not have a coordinate variable mark it as unknown
for dimension in ds . dimensions :
if dimension not in coord_axis_map :
coord_axis_map [ dimension ] = 'U'
return coord_axis_map |
def run ( cmd , shell = False , cwd = None , log_in_real_time = True , check_returncode = True , callback = None , ) :
"""Run ' cmd ' in a shell and return the combined contents of stdout and
stderr ( Blocking ) . Throws an exception if the command exits non - zero .
: param cmd : list of str ( or single str , if shell = = True ) indicating
the command to run
: param shell : boolean indicating whether we are using advanced shell
features . Use only when absolutely necessary , since this allows a lot
more freedom which could be exploited by malicious code . See the
warning here :
http : / / docs . python . org / library / subprocess . html # popen - constructor
: param cwd : dir command is run from .
: type cwd : str
: param log _ in _ real _ time : boolean indicating whether to read stdout from the
subprocess in real time instead of when the process finishes .
: param check _ returncode : Indicate whether a : exc : ` ~ exc . CommandError `
should be raised if return code is different from 0.
: type check _ returncode : : class : ` bool `
: param cwd : dir command is run from , defaults : attr : ` ~ . path ` .
: type cwd : str
: param callback : callback to return output as a command executes , accepts
a function signature of ` ` ( output , timestamp ) ` ` . Example usage : :
def progress _ cb ( output , timestamp ) :
sys . stdout . write ( output )
sys . stdout . flush ( )
run ( [ ' git ' , ' pull ' ] , callback = progrses _ cb )
: type callback : func""" | proc = subprocess . Popen ( cmd , shell = shell , stderr = subprocess . PIPE , stdout = subprocess . PIPE , creationflags = 0 , bufsize = 1 , cwd = cwd , )
all_output = [ ]
code = None
line = None
while code is None :
code = proc . poll ( )
# output = console _ to _ str ( proc . stdout . readline ( ) )
# all _ output . append ( output )
if callback and callable ( callback ) :
line = console_to_str ( proc . stderr . read ( 128 ) )
if line :
callback ( output = line , timestamp = datetime . datetime . now ( ) )
if callback and callable ( callback ) :
callback ( output = '\r' , timestamp = datetime . datetime . now ( ) )
lines = filter ( None , ( line . strip ( ) for line in proc . stdout . readlines ( ) ) )
all_output = console_to_str ( b'\n' . join ( lines ) )
if code :
stderr_lines = filter ( None , ( line . strip ( ) for line in proc . stderr . readlines ( ) ) )
all_output = console_to_str ( b'' . join ( stderr_lines ) )
output = '' . join ( all_output )
if code != 0 and check_returncode :
raise exc . CommandError ( output = output , returncode = code , cmd = cmd )
return output |
def on_get ( self , request , response , user_id = None ) :
"""Responds to GET request for users .""" | response . body = "{}"
if self . handler ( user_id ) :
response . status = falcon . HTTP_200
self . api . register ( utils . mxid2localpart ( user_id ) )
else :
response . status = falcon . HTTP_404 |
def _flatten_dict ( original_dict ) :
"""Flatten dict of dicts into a single dict with appropriate prefixes .
Handles only 2 levels of nesting in the original dict .
Args :
original _ dict : Dict which may contain one or more dicts .
Returns :
flat _ dict : Dict without any nesting . Any dicts in the original dict have
their keys as prefixes in the new dict .
Raises :
ValueError if the original dict has more than two levels of nesting .""" | flat_dict = { }
for key , value in original_dict . items ( ) :
if isinstance ( value , dict ) :
for name , tensor in value . items ( ) :
if isinstance ( tensor , dict ) :
raise ValueError ( "flatten_dict only handles 2 levels of nesting." )
flat_key = "__" + key + "_" + name
flat_dict [ flat_key ] = tensor
else :
flat_dict [ key ] = value
return flat_dict |
def search_ss_bonds ( self , threshold = 3.0 ) :
"""Searches S - S bonds based on distances
between atoms in the structure ( first model only ) .
Average distance is 2.05A . Threshold is 3A default .
Returns iterator with tuples of residues .""" | # Taken from http : / / docs . python . org / library / itertools . html
# Python 2.4 does not include itertools . combinations
def combinations ( iterable , r ) : # combinations ( ' ABCD ' , 2 ) - - > AB AC AD BC BD CD
# combinations ( range ( 4 ) , 3 ) - - > 012 013 023 123
pool = tuple ( iterable )
n = len ( pool )
if r > n :
return
indices = range ( r )
yield tuple ( pool [ i ] for i in indices )
while True :
for i in reversed ( range ( r ) ) :
if indices [ i ] != i + n - r :
break
else :
return
indices [ i ] += 1
for j in range ( i + 1 , r ) :
indices [ j ] = indices [ j - 1 ] + 1
yield tuple ( pool [ i ] for i in indices )
model = self . child_list [ 0 ]
cysteines = [ r for r in model . get_residues ( ) if r . get_resname ( ) == 'CYS' ]
pairs = combinations ( cysteines , 2 )
# Iterator with pairs
for cys_pair in pairs :
if cys_pair [ 0 ] [ 'SG' ] - cys_pair [ 1 ] [ 'SG' ] < threshold :
yield cys_pair |
def get_energy_relax_structure_buckingham ( structure , gulp_cmd = 'gulp' , keywords = ( 'optimise' , 'conp' ) , valence_dict = None ) :
"""Relax a structure and compute the energy using Buckingham potential .
Args :
structure : pymatgen . core . structure . Structure
gulp _ cmd : GULP command if not in standard place
keywords : GULP first line keywords
valence _ dict : { El : valence } . Needed if the structure is not charge
neutral .""" | gio = GulpIO ( )
gc = GulpCaller ( gulp_cmd )
gin = gio . buckingham_input ( structure , keywords , valence_dict = valence_dict )
gout = gc . run ( gin )
energy = gio . get_energy ( gout )
relax_structure = gio . get_relaxed_structure ( gout )
return energy , relax_structure |
def _reset_annot_refs ( self ) :
"""Invalidate / delete all annots of this page .""" | for annot in self . _annot_refs . values ( ) :
if annot :
annot . _erase ( )
self . _annot_refs . clear ( ) |
def extractGlobalParameters ( self , dna , bp , frames = None , paxis = 'Z' , masked = False ) :
"""Extract the parameters for calculations
. . currentmodule : : dnaMD
Parameters
dna : : class : ` dnaMD . DNA `
Input : class : ` dnaMD . DNA ` instance .
bp : list
List of two base - steps forming the DNA segment .
For example : with ` ` bp = [ 5 , 50 ] ` ` , 5-50 base - step segment will be considered .
frames : list
List of two trajectory frames between which parameters will be extracted . It can be used to select portions
of the trajectory . For example , with ` ` frames = [ 100 , 1000 ] ` ` , 100th to 1000th frame of the trajectory will be
considered .
paxis : str
Axis parallel to global helical - axis ( ` ` ' X ' ` ` , or ` ` ' Y ' ` ` or ` ` ' Z ' ` ` ) . Only require when bending motions are
included in the calculation .
masked : bool
` ` Default = False ` ` . To skip specific frames / snapshots .
` ` DNA . mask ` ` array should be set to use this functionality .
This array contains boolean ( either ` ` True ` ` or ` ` False ` ` ) value
for each frame to mask the frames . Presently , mask array is
automatically generated during : meth : ` dnaMD . DNA . generate _ smooth _ axis ` to
skip those frames where 3D fitting curve was not successful within
the given criteria .
Returns
time : numpy . ndarray
1D numpy array of shape ( nframes ) containing time
array : numpy . ndarray
2D numpy array of shape ( parameters count , nframes ) containing extracted parameters .""" | frames = self . _validateFrames ( frames )
if frames [ 1 ] == - 1 :
frames [ 1 ] = None
if ( len ( bp ) != 2 ) :
raise ValueError ( "bp should be a list containing first and last bp of a segment. See, documentation!!!" )
if bp [ 0 ] > bp [ 1 ] :
raise ValueError ( "bp should be a list containing first and last bp of a segment. See, documentation!!!" )
time , clen = dna . time_vs_parameter ( 'h-rise' , bp = bp , merge = True , merge_method = 'sum' , masked = masked )
clen = np . asarray ( clen ) * 0.1
# conversion to nm
time , htwist = dna . time_vs_parameter ( 'h-twist' , bp = bp , merge = True , merge_method = 'sum' , masked = masked )
htwist = np . deg2rad ( htwist )
# Conversion to radian
angleOne , angleTwo = None , None
if self . esType == 'BST' :
angleOne , angleTwo = dna . calculate_2D_angles_bw_tangents ( paxis , bp , masked = masked )
# Rarely there are nan during angle calculation , remove those nan
nanInOne = np . isnan ( angleOne [ frames [ 0 ] : frames [ 1 ] ] )
nanInTwo = np . isnan ( angleTwo [ frames [ 0 ] : frames [ 1 ] ] )
notNan = ~ ( nanInOne + nanInTwo )
notNanIdx = np . nonzero ( notNan )
array = np . array ( [ angleOne [ frames [ 0 ] : frames [ 1 ] ] [ notNanIdx ] , angleTwo [ frames [ 0 ] : frames [ 1 ] ] [ notNanIdx ] , clen [ frames [ 0 ] : frames [ 1 ] ] [ notNanIdx ] , htwist [ frames [ 0 ] : frames [ 1 ] ] [ notNanIdx ] ] )
time = ( time [ frames [ 0 ] : frames [ 1 ] ] ) [ notNanIdx ]
else :
array = np . array ( [ clen [ frames [ 0 ] : frames [ 1 ] ] , htwist [ frames [ 0 ] : frames [ 1 ] ] ] )
time = time [ frames [ 0 ] : frames [ 1 ] ]
return time , array |
def _construct_axes_dict_for_slice ( self , axes = None , ** kwargs ) :
"""Return an axes dictionary for myself .""" | d = { self . _AXIS_SLICEMAP [ a ] : self . _get_axis ( a ) for a in ( axes or self . _AXIS_ORDERS ) }
d . update ( kwargs )
return d |
def _script_to_har_entry ( cls , script , url ) :
'''Return entry for embed script''' | entry = { 'request' : { 'url' : url } , 'response' : { 'url' : url , 'content' : { 'text' : script } } }
cls . _set_entry_type ( entry , INLINE_SCRIPT_ENTRY )
return entry |
def remove_port_channel ( self , ** kwargs ) :
"""Remove a port channel interface .
Args :
port _ int ( str ) : port - channel number ( 1 , 2 , 3 , etc ) .
callback ( function ) : A function executed upon completion of the
method . The only parameter passed to ` callback ` will be the
` ` ElementTree ` ` ` config ` .
Returns :
Return value of ` callback ` .
Raises :
KeyError : if ` port _ int ` is not passed .
ValueError : if ` port _ int ` is invalid .
Examples :
> > > import pynos . device
> > > switches = [ ' 10.24.39.211 ' , ' 10.24.39.203 ' ]
> > > auth = ( ' admin ' , ' password ' )
> > > for switch in switches :
. . . conn = ( switch , ' 22 ' )
. . . with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . output = dev . interface . channel _ group ( name = ' 225/0/20 ' ,
. . . int _ type = ' tengigabitethernet ' ,
. . . port _ int = ' 1 ' , channel _ type = ' standard ' , mode = ' active ' )
. . . output = dev . interface . remove _ port _ channel (
. . . port _ int = ' 1 ' )""" | port_int = kwargs . pop ( 'port_int' )
callback = kwargs . pop ( 'callback' , self . _callback )
if re . search ( '^[0-9]{1,4}$' , port_int ) is None :
raise ValueError ( '%s must be in the format of x for port channel ' 'interfaces.' % repr ( port_int ) )
port_channel = getattr ( self . _interface , 'interface_port_channel_name' )
port_channel_args = dict ( name = port_int )
config = port_channel ( ** port_channel_args )
delete_channel = config . find ( './/*port-channel' )
delete_channel . set ( 'operation' , 'delete' )
return callback ( config ) |
def _addNoise ( self , pattern , noiseLevel ) :
"""Adds noise the given list of patterns and returns a list of noisy copies .""" | if pattern is None :
return None
newBits = [ ]
for bit in pattern :
if random . random ( ) < noiseLevel :
newBits . append ( random . randint ( 0 , max ( pattern ) ) )
else :
newBits . append ( bit )
return set ( newBits ) |
def find_matching_endpoints ( self , swagger_ns ) :
"""Compute current matching endpoints .
Evaluated as a property to defer evaluation .""" | def match_func ( operation , ns , rule ) : # only expose endpoints that have the correct path prefix and operation
return ( rule . rule . startswith ( self . graph . build_route_path ( swagger_ns . path , swagger_ns . prefix ) ) and operation in self . matching_operations )
return list ( iter_endpoints ( self . graph , match_func ) ) |
def get_model_home ( ) :
'''Returns a root folder path for downloading models .''' | d = os . path . join ( get_data_home ( ) , 'nnp_models' )
if not os . path . isdir ( d ) :
os . makedirs ( d )
return d |
def detect_version ( ) :
"""Parse the output of psql to detect Postgres version .""" | version_regex = re . compile ( r'\(PostgreSQL\) (?P<major>\d)\.(?P<minor>\d)\.(?P<bugfix>\d)' )
pg_version = None
with hide ( 'running' , 'stdout' , 'stderr' ) :
output = run ( 'psql --version' )
match = version_regex . search ( output )
if match :
result = match . groupdict ( )
if 'major' in result and 'minor' in result :
pg_version = u'%(major)s.%(minor)s' % result
if not pg_version :
abort ( u"Error: Could not determine Postgres version of the server." )
return pg_version |
def get_authorize_url ( self , state , scope = 'identity' , refreshable = False ) :
"""Return the URL to send the user to for OAuth2 authorization .
: param state : a unique string of your choice that represents this
individual client
: param scope : the reddit scope to ask permissions for . Multiple scopes
can be enabled by passing in a container of strings .
: param refreshable : when True , a permanent " refreshable " token is
issued""" | params = { 'client_id' : self . client_id , 'response_type' : 'code' , 'redirect_uri' : self . redirect_uri , 'state' : state , 'scope' : _to_reddit_list ( scope ) }
params [ 'duration' ] = 'permanent' if refreshable else 'temporary'
request = Request ( 'GET' , self . config [ 'authorize' ] , params = params )
return request . prepare ( ) . url |
def append_tz_timestamp ( self , tag , timestamp = None , precision = 3 , header = False ) :
"""Append a field with a TZTimestamp value , derived from local time .
: param tag : Integer or string FIX tag number .
: param timestamp : Time value , see below .
: param precision : Number of decimal places : 0 , 3 ( ms ) or 6 ( us ) .
: param header : Append to FIX header if True ; default to body .
The ` timestamp ` value should be a local datetime , such as created
by datetime . datetime . now ( ) ; a float , being the number of seconds
since midnight 1 Jan 1970 UTC , such as returned by time . time ( ) ;
or , None , in which case datetime . datetime . now ( ) is used to get
the current local time .
Precision values other than zero ( seconds ) , 3 ( milliseconds ) ,
or 6 ( microseconds ) will raise an exception . Note that prior
to FIX 5.0 , only values of 0 or 3 comply with the standard .""" | # Get float offset from Unix epoch .
if timestamp is None :
now = time . time ( )
elif type ( timestamp ) is float :
now = timestamp
else :
now = time . mktime ( timestamp . timetuple ( ) ) + ( timestamp . microsecond * 1e-6 )
# Get offset of local timezone east of UTC .
utc = datetime . datetime . utcfromtimestamp ( now )
local = datetime . datetime . fromtimestamp ( now )
td = local - utc
offset = int ( ( ( td . days * 86400 ) + td . seconds ) / 60 )
s = local . strftime ( "%Y%m%d-%H:%M:%S" )
if precision == 3 :
s += ".%03u" % ( local . microsecond / 1000 )
elif precision == 6 :
s += ".%06u" % local . microsecond
elif precision != 0 :
raise ValueError ( "Precision (%u) should be one of " "0, 3 or 6 digits" % precision )
s += self . _tz_offset_string ( offset )
return self . append_pair ( tag , s , header = header ) |
def createConnection ( self ) :
"""Return a CardConnection to the Card object .""" | readerobj = None
if isinstance ( self . reader , Reader ) :
readerobj = self . reader
elif type ( self . reader ) == str :
for reader in readers ( ) :
if self . reader == str ( reader ) :
readerobj = reader
if readerobj :
return readerobj . createConnection ( )
else : # raise CardConnectionException (
# ' not a valid reader : ' + str ( self . reader ) )
return None |
def securitycli ( ) :
"""Entry point for the runner defined in setup . py .""" | parser = argparse . ArgumentParser ( description = "Runner for security test suite" )
parser . add_argument ( "-l" , "--list-test-groups" , action = "store_true" , help = "List all logical test groups" )
parser . add_argument ( "-a" , "--list-all-tests" , action = "store_true" , help = "List all tests" )
parser . add_argument ( "-i" , "--include" , metavar = "GROUP" , action = "append" , default = [ ] , help = "Only include specified group(s) in run, include several " "groups by repeating flag" )
parser . add_argument ( "--version" , action = "store" , dest = "version" , help = "B2G version" )
parser . add_argument ( "--ipython" , dest = "ipython" , action = "store_true" , help = "drop to ipython session" )
parser . add_argument ( '-H' , '--host' , help = 'Hostname or ip for target device' , action = 'store' , default = 'localhost' )
parser . add_argument ( '-P' , '--port' , help = 'Port for target device' , action = 'store' , default = 2828 )
parser . add_argument ( '-m' , '--mode' , help = 'Test mode (stingray, phone) default (phone)' , action = 'store' , default = 'phone' )
parser . add_argument ( "-v" , dest = "verbose" , action = "store_true" , help = "Verbose output" )
# add specialized mozilla logger options
commandline . add_logging_group ( parser )
args = parser . parse_args ( )
# set up mozilla logger
logger = commandline . setup_logging ( "securitysuite" , vars ( args ) , { "raw" : sys . stdout } )
try :
if args . list_test_groups :
for group in ExtraTest . group_list ( args . mode ) :
print group
elif args . list_all_tests :
for test in ExtraTest . test_list ( args . mode ) :
print "%s.%s" % ( test . group , test . __name__ )
elif args . ipython :
from IPython import embed
embed ( )
elif args . mode == 'stingray' :
logger . debug ( "security cli runnng with args %s" % args )
ExtraTest . run_groups ( args . include , version = args . version , host = args . host , port = int ( args . port ) , mode = args . mode )
else :
logger . debug ( "security cli runnng with args %s" % args )
wait_for_adb_device ( )
if not adb_has_root ( ) :
logger . warning ( "adb has no root. Results will be incomplete." )
ExtraTest . run_groups ( args . include , version = args . version )
except :
logger . critical ( traceback . format_exc ( ) )
raise |
def command ( self , cmd_code , cmd_data , timeout ) :
"""Send a host command and return the chip response .""" | log . log ( logging . DEBUG - 1 , self . CMD [ cmd_code ] + " " + hexlify ( cmd_data ) )
frame = bytearray ( [ 0xD4 , cmd_code ] ) + bytearray ( cmd_data )
frame = bytearray ( [ 0xFF , 0x00 , 0x00 , 0x00 , len ( frame ) ] ) + frame
frame = self . ccid_xfr_block ( frame , timeout )
if not frame or len ( frame ) < 4 :
log . error ( "insufficient data for decoding chip response" )
raise IOError ( errno . EIO , os . strerror ( errno . EIO ) )
if not ( frame [ 0 ] == 0xD5 and frame [ 1 ] == cmd_code + 1 ) :
log . error ( "received invalid chip response" )
raise IOError ( errno . EIO , os . strerror ( errno . EIO ) )
if not ( frame [ - 2 ] == 0x90 and frame [ - 1 ] == 0x00 ) :
log . error ( "received pseudo apdu with error status" )
raise IOError ( errno . EIO , os . strerror ( errno . EIO ) )
return frame [ 2 : - 2 ] |
def _parse_var_int_components ( buf , signed ) :
"""Parses a ` ` VarInt ` ` or ` ` VarUInt ` ` field from a file - like object .""" | value = 0
sign = 1
while True :
ch = buf . read ( 1 )
if ch == '' :
raise IonException ( 'Variable integer under-run' )
octet = ord ( ch )
if signed :
if octet & _VAR_INT_SIGN_MASK :
sign = - 1
value = octet & _VAR_INT_SIGN_VALUE_MASK
signed = False
else :
value <<= _VAR_INT_VALUE_BITS
value |= octet & _VAR_INT_VALUE_MASK
if octet & _VAR_INT_SIGNAL_MASK :
break
return sign , value |
def _on_motion ( self , event ) :
"""Drag around label if visible .""" | if not self . _visual_drag . winfo_ismapped ( ) :
return
if self . _drag_cols and self . _dragged_col is not None :
self . _drag_col ( event )
elif self . _drag_rows and self . _dragged_row is not None :
self . _drag_row ( event ) |
def direct_evidences ( self ) :
""": return : All available direct evidences for gene disease correlations
: rtype : list""" | q = self . session . query ( distinct ( models . GeneDisease . direct_evidence ) )
return q . all ( ) |
def get_next_create_state ( self , state , ret ) :
"""Return the next create state from previous state .""" | if ret :
if state == fw_const . FABRIC_PREPARE_DONE_STATE :
return state
else :
return state + 1
else :
return state |
def reissueMissingJobs ( self , killAfterNTimesMissing = 3 ) :
"""Check all the current job ids are in the list of currently running batch system jobs .
If a job is missing , we mark it as so , if it is missing for a number of runs of
this function ( say 10 ) . . then we try deleting the job ( though its probably lost ) , we wait
then we pass the job to processFinishedJob .""" | runningJobs = set ( self . batchSystem . getIssuedBatchJobIDs ( ) )
jobBatchSystemIDsSet = set ( list ( self . jobBatchSystemIDToIssuedJob . keys ( ) ) )
# Clean up the reissueMissingJobs _ missingHash hash , getting rid of jobs that have turned up
missingJobIDsSet = set ( list ( self . reissueMissingJobs_missingHash . keys ( ) ) )
for jobBatchSystemID in missingJobIDsSet . difference ( jobBatchSystemIDsSet ) :
self . reissueMissingJobs_missingHash . pop ( jobBatchSystemID )
logger . warn ( "Batch system id: %s is no longer missing" , str ( jobBatchSystemID ) )
assert runningJobs . issubset ( jobBatchSystemIDsSet )
# Assert checks we have
# no unexpected jobs running
jobsToKill = [ ]
for jobBatchSystemID in set ( jobBatchSystemIDsSet . difference ( runningJobs ) ) :
jobStoreID = self . jobBatchSystemIDToIssuedJob [ jobBatchSystemID ] . jobStoreID
if jobBatchSystemID in self . reissueMissingJobs_missingHash :
self . reissueMissingJobs_missingHash [ jobBatchSystemID ] += 1
else :
self . reissueMissingJobs_missingHash [ jobBatchSystemID ] = 1
timesMissing = self . reissueMissingJobs_missingHash [ jobBatchSystemID ]
logger . warn ( "Job store ID %s with batch system id %s is missing for the %i time" , jobStoreID , str ( jobBatchSystemID ) , timesMissing )
if self . toilMetrics :
self . toilMetrics . logMissingJob ( )
if timesMissing == killAfterNTimesMissing :
self . reissueMissingJobs_missingHash . pop ( jobBatchSystemID )
jobsToKill . append ( jobBatchSystemID )
self . killJobs ( jobsToKill )
return len ( self . reissueMissingJobs_missingHash ) == 0 |
def load_module ( filename ) :
"""Loads a module from anywhere in the system .
Does not depend on or modify sys . path .""" | path , name = os . path . split ( filename )
name , ext = os . path . splitext ( name )
( file , filename , desc ) = imp . find_module ( name , [ path ] )
try :
return imp . load_module ( name , file , filename , desc )
finally :
if file :
file . close ( ) |
def run_summaries ( sess , merged_summaries , summary_writer , epoch , feed , tens ) :
"""Run the summaries and error computation on the validation set .
Parameters
sess : tf . Session
Tensorflow session object .
merged _ summaries : tf obj
Tensorflow merged summaries obj .
summary _ writer : tf . summary . FileWriter
Tensorflow summary writer obj .
epoch : int
Current training epoch .
feed : dict
Validation feed dict .
tens : tf . Tensor
Tensor to display and evaluate during training .
Can be self . accuracy for SupervisedModel or self . cost for
UnsupervisedModel .
Returns
err : float , mean error over the validation set .""" | try :
result = sess . run ( [ merged_summaries , tens ] , feed_dict = feed )
summary_str = result [ 0 ]
out = result [ 1 ]
summary_writer . add_summary ( summary_str , epoch )
except tf . errors . InvalidArgumentError :
out = sess . run ( tens , feed_dict = feed )
return out |
def load_django_ ( self , query : "django query" ) -> "Ds" :
"""Returns a DataSwim instance from a django orm query
: param query : django query from a model
: type query : django query
: return : a dataswim instance with data from a django query
: rtype : Ds
: example : ` ` ds2 = ds . load _ django _ ( Mymodel . objects . all ( ) ) ` `""" | try :
df = self . _load_django ( query )
return self . clone_ ( df )
except Exception as e :
self . err ( e , "Can not load data from query" ) |
def _get_imported_module ( self , module_name ) :
"""try to get imported module reference by its name""" | # if imported module on module _ set add to list
imp_mod = self . by_name . get ( module_name )
if imp_mod :
return imp_mod
# last part of import section might not be a module
# remove last section
no_obj = module_name . rsplit ( '.' , 1 ) [ 0 ]
imp_mod2 = self . by_name . get ( no_obj )
if imp_mod2 :
return imp_mod2
# special case for _ _ init _ _
if module_name in self . pkgs :
pkg_name = module_name + ".__init__"
return self . by_name [ pkg_name ]
if no_obj in self . pkgs :
pkg_name = no_obj + ".__init__"
return self . by_name [ pkg_name ] |
def register_condition ( self , * args , ** kwds ) :
"""Usage :
@ profile . register _ condition
@ condition
def myCondition ( ) :
return 123
# or
@ profile . register _ condition ( name = ' my _ condition ' )
@ condition
def myCondition ( ) :
return 123""" | if len ( args ) == 1 and len ( kwds ) == 0 and callable ( args [ 0 ] ) :
return self . _add_condition ( args [ 0 ] )
else :
return partial ( self . _add_condition , * args , ** kwds ) |
def approximate_size ( size , a_kilobyte_is_1024_bytes = True ) :
'''Humansize . py from Dive into Python3
Mark Pilgrim - http : / / www . diveintopython3 . net /
Copyright ( c ) 2009 , Mark Pilgrim , All rights reserved .
Convert a file size to human - readable form .
Keyword arguments :
size - - file size in bytes
a _ kilobyte _ is _ 1024 _ bytes - - if True ( default ) , use multiples of 1024
if False , use multiples of 1000
Returns : string''' | size = float ( size )
if size < 0 :
raise ValueError ( 'number must be non-negative' )
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES [ multiple ] :
size /= multiple
if size < multiple :
return '{0:.1f}{1}' . format ( size , suffix )
raise ValueError ( 'number too large' ) |
def get_parser ( ) :
"""Return the parser object for this script .""" | from argparse import ArgumentParser , ArgumentDefaultsHelpFormatter
parser = ArgumentParser ( description = __doc__ , formatter_class = ArgumentDefaultsHelpFormatter )
parser . add_argument ( "-m" , "--model" , dest = "model" , help = "where is the model folder (with a info.yml)?" , metavar = "FOLDER" , type = lambda x : utils . is_valid_folder ( parser , x ) , default = utils . default_model ( ) )
return parser |
def parse_skewer_log ( self , f ) :
"""Go through log file looking for skewer output""" | fh = f [ 'f' ]
regexes = { 'fq1' : "Input file:\s+(.+)" , 'fq2' : "Paired file:\s+(.+)" , 'r_processed' : "(\d+) read|reads pairs? processed" , 'r_short_filtered' : "(\d+) \(\s*\d+.\d+%\) short read" , 'r_empty_filtered' : "(\d+) \(\s*\d+.\d+%\) empty read" , 'r_avail' : "(\d+) \(\s*\d+.\d+%\) read" , 'r_trimmed' : "(\d+) \(\s*\d+.\d+%\) trimmed read" , 'r_untrimmed' : "(\d+) \(\s*\d+.\d+%\) untrimmed read" }
regex_hist = "\s?(\d+)\s+(\d+)\s+(\d+.\d+)%"
data = dict ( )
for k , v in regexes . items ( ) :
data [ k ] = 0
data [ 'fq1' ] = None
data [ 'fq2' ] = None
readlen_dist = OrderedDict ( )
for l in fh :
for k , r in regexes . items ( ) :
match = re . search ( r , l )
if match :
data [ k ] = match . group ( 1 ) . replace ( ',' , '' )
match = re . search ( regex_hist , l )
if match :
read_length = int ( match . group ( 1 ) )
pct_at_rl = float ( match . group ( 3 ) )
readlen_dist [ read_length ] = pct_at_rl
if data [ 'fq1' ] is not None :
s_name = self . clean_s_name ( data [ 'fq1' ] , f [ 'root' ] )
if s_name in self . skewer_readlen_dist :
log . debug ( "Duplicate sample name found in {}! Overwriting: {}" . format ( f [ 'fn' ] , s_name ) )
self . add_data_source ( f , s_name )
self . add_skewer_data ( s_name , data , f )
self . skewer_readlen_dist [ s_name ] = readlen_dist
if data [ 'fq2' ] is not None :
s_name = self . clean_s_name ( data [ 'fq1' ] , f [ 'root' ] )
if s_name in self . skewer_readlen_dist :
log . debug ( "Duplicate sample name found in {}! Overwriting: {}" . format ( f [ 'fn' ] , s_name ) )
self . add_data_source ( f , s_name )
self . add_skewer_data ( s_name , data , f )
self . skewer_readlen_dist [ s_name ] = readlen_dist |
def merge_ownership_periods ( mappings ) :
"""Given a dict of mappings where the values are lists of
OwnershipPeriod objects , returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps .
Orders the periods chronologically , and pushes forward the end date
of each period to match the start date of the following period . The
end date of the last period pushed forward to the max Timestamp .""" | return valmap ( lambda v : tuple ( OwnershipPeriod ( a . start , b . start , a . sid , a . value , ) for a , b in sliding_window ( 2 , concatv ( sorted ( v ) , # concat with a fake ownership object to make the last
# end date be max timestamp
[ OwnershipPeriod ( pd . Timestamp . max . tz_localize ( 'utc' ) , None , None , None , ) ] , ) , ) ) , mappings , ) |
def runExperiment ( ) :
"""Experiment 1 : Calculate error rate as a function of training sequence numbers
: return :""" | trainSeqN = [ 5 , 10 , 20 , 50 , 100 , 200 ]
rptPerCondition = 20
correctRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
missRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
fpRateAll = np . zeros ( ( len ( trainSeqN ) , rptPerCondition ) )
for i in xrange ( len ( trainSeqN ) ) :
for rpt in xrange ( rptPerCondition ) :
numTrainSequence = trainSeqN [ i ]
correctRate , missRate , fpRate = runSingleExperiment ( numTrainSequence = numTrainSequence )
correctRateAll [ i , rpt ] = correctRate
missRateAll [ i , rpt ] = missRate
fpRateAll [ i , rpt ] = fpRate
plt . figure ( )
plt . subplot ( 2 , 2 , 1 )
plt . semilogx ( trainSeqN , 100 * np . mean ( correctRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' Hit Rate - Best Match (%)' )
plt . subplot ( 2 , 2 , 2 )
plt . semilogx ( trainSeqN , 100 * np . mean ( missRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' Miss Rate (%)' )
plt . subplot ( 2 , 2 , 3 )
plt . semilogx ( trainSeqN , 100 * np . mean ( fpRateAll , 1 ) , '-*' )
plt . xlabel ( ' Training Sequence Number' )
plt . ylabel ( ' False Positive Rate (%)' )
plt . savefig ( 'result/ReberSequence_HMMperformance.pdf' )
plt . show ( ) |
def get_tag_embs ( self , tag_dims ) :
"""Randomly initialize embeddings for tag
Parameters
tag _ dims : int
tag vector size
Returns
numpy . ndarray
random embeddings""" | return np . random . randn ( self . tag_size , tag_dims ) . astype ( np . float32 ) |
def hss ( self ) :
"""Doolittle ( Heidke ) Skill Score . 2 ( ad - bc ) / ( ( a + b ) ( b + d ) + ( a + c ) ( c + d ) )""" | return 2 * ( self . table [ 0 , 0 ] * self . table [ 1 , 1 ] - self . table [ 0 , 1 ] * self . table [ 1 , 0 ] ) / ( ( self . table [ 0 , 0 ] + self . table [ 0 , 1 ] ) * ( self . table [ 0 , 1 ] + self . table [ 1 , 1 ] ) + ( self . table [ 0 , 0 ] + self . table [ 1 , 0 ] ) * ( self . table [ 1 , 0 ] + self . table [ 1 , 1 ] ) ) |
def replace ( self , p_todos ) :
"""Replaces whole todolist with todo objects supplied as p _ todos .""" | self . erase ( )
self . add_todos ( p_todos )
self . dirty = True |
def t_asmcomment_NEWLINE ( self , t ) :
r'\ r ? \ n' | # New line = > remove whatever state in top of the stack and replace it with INITIAL
t . lexer . lineno += 1
t . lexer . pop_state ( )
return t |
def tangent_bundle ( self ) :
"""The tangent bundle associated with ` domain ` using ` partition ` .
The tangent bundle of a space ` ` X ` ` of functions ` ` R ^ d - - > F ` ` can be
interpreted as the space of vector - valued functions ` ` R ^ d - - > F ^ d ` ` .
This space can be identified with the power space ` ` X ^ d ` ` as used
in this implementation .""" | if self . ndim == 0 :
return ProductSpace ( field = self . field )
else :
return ProductSpace ( self , self . ndim ) |
def OnDeleteCols ( self , event ) :
"""Deletes columns from all tables of the grid""" | bbox = self . grid . selection . get_bbox ( )
if bbox is None or bbox [ 1 ] [ 1 ] is None : # Insert rows at cursor
del_point = self . grid . actions . cursor [ 1 ]
no_cols = 1
else : # Insert at right edge of bounding box
del_point = bbox [ 0 ] [ 1 ]
no_cols = self . _get_no_rowscols ( bbox ) [ 1 ]
with undo . group ( _ ( "Delete columns" ) ) :
self . grid . actions . delete_cols ( del_point , no_cols )
self . grid . GetTable ( ) . ResetView ( )
# Update the default sized cell sizes
self . grid . actions . zoom ( )
event . Skip ( ) |
def issues ( self , ** kwargs ) :
"""List issues related to this milestone .
Args :
all ( bool ) : If True , return all the items , without pagination
per _ page ( int ) : Number of items to retrieve per request
page ( int ) : ID of the page to return ( starts with page 1)
as _ list ( bool ) : If set to False and no pagination option is
defined , return a generator instead of a list
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabListError : If the list could not be retrieved
Returns :
RESTObjectList : The list of issues""" | path = '%s/%s/issues' % ( self . manager . path , self . get_id ( ) )
data_list = self . manager . gitlab . http_list ( path , as_list = False , ** kwargs )
manager = ProjectIssueManager ( self . manager . gitlab , parent = self . manager . _parent )
# FIXME ( gpocentek ) : the computed manager path is not correct
return RESTObjectList ( manager , ProjectIssue , data_list ) |
def _check_compatibility ( self ) :
"""Make sure the next objects to be tallied are compatible with the
stored trace .""" | stored_descr = self . _file_trace_description ( )
try :
for k , v in self . _model_trace_description ( ) :
assert ( stored_descr [ k ] [ 0 ] == v [ 0 ] )
except :
raise ValueError ( "The objects to tally are incompatible with the objects stored in the file." ) |
def show ( self , obj ) :
"""Renders the supplied object and displays it using the active
GUI backend .""" | if self . interactive :
if isinstance ( obj , list ) :
return [ self . get_plot ( o ) for o in obj ]
return self . get_plot ( obj )
from . plot import MPLPlot
MPLPlot . _close_figures = False
try :
plots = [ ]
objects = obj if isinstance ( obj , list ) else [ obj ]
for o in objects :
plots . append ( self . get_plot ( o ) )
plt . show ( )
except :
raise
finally :
MPLPlot . _close_figures = True
return plots [ 0 ] if len ( plots ) == 1 else plots |
def get ( self , id ) :
"""Get run by id""" | run = self . backend_store . get_run ( id )
if not run :
return abort ( http_client . NOT_FOUND , message = "Run {} doesn't exist" . format ( id ) )
return run_model . format_response ( run ) |
def send_quick_chat_from_agent ( self , team_only , quick_chat ) :
"""Passes the agents quick chats to the game , and also to other python bots .
This does perform limiting .
You are limited to 5 quick chats in a 2 second period starting from the first chat .
This means you can spread your chats out to be even within that 2 second period .
You could spam them in the first little bit but then will be throttled .""" | # Send the quick chat to the game
rlbot_status = send_quick_chat_flat ( self . game_interface , self . index , self . team , team_only , quick_chat )
if rlbot_status == RLBotCoreStatus . QuickChatRateExceeded :
self . logger . debug ( 'quick chat disabled' )
else : # Make the quick chat visible to other python bots . Unfortunately other languages can ' t see it .
send_quick_chat ( self . quick_chat_queue_holder , self . index , self . team , team_only , quick_chat ) |
def _make_title ( self ) :
"""Make the title""" | if self . _title :
for i , title_line in enumerate ( self . _title , 1 ) :
self . svg . node ( self . nodes [ 'title' ] , 'text' , class_ = 'title plot_title' , x = self . width / 2 , y = i * ( self . style . title_font_size + self . spacing ) ) . text = title_line |
def load_label ( self , idx ) :
"""Load label image as 1 x height x width integer array of label indices .
Shift labels so that classes are 0-39 and void is 255 ( to ignore it ) .
The leading singleton dimension is required by the loss .""" | label = scipy . io . loadmat ( '{}/segmentation/img_{}.mat' . format ( self . nyud_dir , idx ) ) [ 'segmentation' ] . astype ( np . uint8 )
label -= 1
# rotate labels
label = label [ np . newaxis , ... ]
return label |
def replace_missing_value ( self , str_in ) : # type : ( Text ) - > Text
"""returns ' str _ in ' if it is not equals to the ' sentinel ' as
defined in the missingValue section of
the schema . Else it will return the ' replaceWith ' value .
: param str _ in :
: return : str _ in or the missingValue replacement value""" | if self . missing_value is None :
return str_in
elif self . missing_value . sentinel == str_in :
return self . missing_value . replace_with
else :
return str_in |
def stream_skypipe_output ( endpoint , name = None ) :
"""Generator for reading skypipe data""" | name = name or ''
socket = ctx . socket ( zmq . DEALER )
socket . connect ( endpoint )
try :
socket . send_multipart ( sp_msg ( SP_CMD_LISTEN , name ) )
while True :
msg = socket . recv_multipart ( )
try :
data = parse_skypipe_data_stream ( msg , name )
if data :
yield data
except EOFError :
raise StopIteration ( )
finally :
socket . send_multipart ( sp_msg ( SP_CMD_UNLISTEN , name ) )
socket . close ( ) |
async def blob ( self , elem = None , elem_type = None , params = None ) :
"""Loads / dumps blob
: return :""" | elem_type = elem_type if elem_type else elem . __class__
if hasattr ( elem_type , 'blob_serialize' ) :
elem = elem_type ( ) if elem is None else elem
return await elem . blob_serialize ( self , elem = elem , elem_type = elem_type , params = params )
if self . writing :
return await x . dump_blob ( self . iobj , elem = elem , elem_type = elem_type , params = params )
else :
return await x . load_blob ( self . iobj , elem_type = elem_type , params = params , elem = elem ) |
def get_package_version ( self , feed , group_id , artifact_id , version , show_deleted = None ) :
"""GetPackageVersion .
[ Preview API ] Get information about a package version .
: param str feed : Name or ID of the feed .
: param str group _ id : Group ID of the package .
: param str artifact _ id : Artifact ID of the package .
: param str version : Version of the package .
: param bool show _ deleted : True to show information for deleted packages .
: rtype : : class : ` < Package > < azure . devops . v5_1 . maven . models . Package > `""" | route_values = { }
if feed is not None :
route_values [ 'feed' ] = self . _serialize . url ( 'feed' , feed , 'str' )
if group_id is not None :
route_values [ 'groupId' ] = self . _serialize . url ( 'group_id' , group_id , 'str' )
if artifact_id is not None :
route_values [ 'artifactId' ] = self . _serialize . url ( 'artifact_id' , artifact_id , 'str' )
if version is not None :
route_values [ 'version' ] = self . _serialize . url ( 'version' , version , 'str' )
query_parameters = { }
if show_deleted is not None :
query_parameters [ 'showDeleted' ] = self . _serialize . query ( 'show_deleted' , show_deleted , 'bool' )
response = self . _send ( http_method = 'GET' , location_id = '180ed967-377a-4112-986b-607adb14ded4' , version = '5.1-preview.1' , route_values = route_values , query_parameters = query_parameters )
return self . _deserialize ( 'Package' , response ) |
def find_one ( self , filter = None , * args , ** kwargs ) :
"""Get a single file from gridfs .
All arguments to : meth : ` find ` are also valid arguments for
: meth : ` find _ one ` , although any ` limit ` argument will be
ignored . Returns a single : class : ` ~ gridfs . grid _ file . GridOut ` ,
or ` ` None ` ` if no matching file is found . For example : :
file = fs . find _ one ( { " filename " : " lisa . txt " } )
: Parameters :
- ` filter ` ( optional ) : a dictionary specifying
the query to be performing OR any other type to be used as
the value for a query for ` ` " _ id " ` ` in the file collection .
- ` * args ` ( optional ) : any additional positional arguments are
the same as the arguments to : meth : ` find ` .
- ` * * kwargs ` ( optional ) : any additional keyword arguments
are the same as the arguments to : meth : ` find ` .""" | if filter is not None and not isinstance ( filter , Mapping ) :
filter = { "_id" : filter }
for f in self . find ( filter , * args , ** kwargs ) :
return f
return None |
def add_suffix ( self , suffix ) :
"""Suffix labels with string ` suffix ` .
For Series , the row labels are suffixed .
For DataFrame , the column labels are suffixed .
Parameters
suffix : str
The string to add after each label .
Returns
Series or DataFrame
New Series or DataFrame with updated labels .
See Also
Series . add _ prefix : Prefix row labels with string ` prefix ` .
DataFrame . add _ prefix : Prefix column labels with string ` prefix ` .
Examples
> > > s = pd . Series ( [ 1 , 2 , 3 , 4 ] )
0 1
1 2
2 3
3 4
dtype : int64
> > > s . add _ suffix ( ' _ item ' )
0 _ item 1
1 _ item 2
2 _ item 3
3 _ item 4
dtype : int64
> > > df = pd . DataFrame ( { ' A ' : [ 1 , 2 , 3 , 4 ] , ' B ' : [ 3 , 4 , 5 , 6 ] } )
> > > df
A B
0 1 3
1 2 4
2 3 5
3 4 6
> > > df . add _ suffix ( ' _ col ' )
A _ col B _ col
0 1 3
1 2 4
2 3 5
3 4 6""" | f = functools . partial ( '{}{suffix}' . format , suffix = suffix )
mapper = { self . _info_axis_name : f }
return self . rename ( ** mapper ) |
def init_requests_cache ( refresh_cache = False ) :
"""Initializes a cache which the ` ` requests ` ` library will consult for
responses , before making network requests .
: param refresh _ cache : Whether the cache should be cleared out""" | # Cache data from external sources ; used in some checks
dirs = AppDirs ( "stix2-validator" , "OASIS" )
# Create cache dir if doesn ' t exist
try :
os . makedirs ( dirs . user_cache_dir )
except OSError as e :
if e . errno != errno . EEXIST :
raise
requests_cache . install_cache ( cache_name = os . path . join ( dirs . user_cache_dir , 'py{}cache' . format ( sys . version_info [ 0 ] ) ) , expire_after = datetime . timedelta ( weeks = 1 ) )
if refresh_cache :
clear_requests_cache ( ) |
def accounts ( self , id = None ) :
"""Returns a collection of advertiser : class : ` Accounts ` available to
the current access token .""" | return Account . load ( self , id ) if id else Account . all ( self ) |
def _update_fields_with_objects ( self ) :
"""Convert dict fields into objects , where appropriate""" | # Update the photo target with photo objects
if self . target is not None :
if self . target_type == "photo" :
self . target = Photo ( self . _client , self . target )
else :
raise NotImplementedError ( "Actions can only be assigned to " "Photos" ) |
def is_get_query_with_results ( results ) :
""": param results : the response from Elasticsearch
: return : true if the get query returned a result , false otherwise""" | return results and EsConst . FOUND in results and results [ EsConst . FOUND ] and EsConst . FIELDS in results |
def _service_by_name ( name ) :
'''Return the service info for a service by label , filename or path''' | services = _available_services ( )
name = name . lower ( )
if name in services : # Match on label
return services [ name ]
for service in six . itervalues ( services ) :
if service [ 'file_path' ] . lower ( ) == name : # Match on full path
return service
basename , ext = os . path . splitext ( service [ 'filename' ] )
if basename . lower ( ) == name : # Match on basename
return service
return False |
def resolve_field_instance ( cls_or_instance ) :
"""Return a Schema instance from a Schema class or instance .
: param type | Schema cls _ or _ instance : Marshmallow Schema class or instance .""" | if isinstance ( cls_or_instance , type ) :
if not issubclass ( cls_or_instance , FieldABC ) :
raise FieldInstanceResolutionError
return cls_or_instance ( )
else :
if not isinstance ( cls_or_instance , FieldABC ) :
raise FieldInstanceResolutionError
return cls_or_instance |
def _build_install_args ( options ) :
'''Build the arguments to ' python setup . py install ' on the setuptools package''' | install_args = [ ]
if options . user_install :
if sys . version_info < ( 2 , 6 ) :
log . warn ( '--user requires Python 2.6 or later' )
raise SystemExit ( 1 )
install_args . append ( '--user' )
return install_args |
def extract_init_args ( instance ) :
"""Given an instance , and under the assumption that member variables have the
same name as the _ _ init _ _ arguments , extract the arguments so they can
be used to reconstruct the instance when deserializing""" | cls = instance . __class__
args = [ x for x in inspect . getargspec ( cls . __init__ ) . args if x != 'self' ]
return [ instance . __dict__ [ key ] for key in args ] |
def get_distributable ( sender : NettingChannelEndState , receiver : NettingChannelEndState , ) -> TokenAmount :
"""Return the amount of tokens that can be used by the ` sender ` .
The returned value is limited to a UINT256 , since that is the representation
used in the smart contracts and we cannot use a larger value . The limit is
enforced on transferred _ amount + locked _ amount to avoid overflows . This is
an additional security check .""" | _ , _ , transferred_amount , locked_amount = get_current_balanceproof ( sender )
distributable = get_balance ( sender , receiver ) - get_amount_locked ( sender )
overflow_limit = max ( UINT256_MAX - transferred_amount - locked_amount , 0 , )
return TokenAmount ( min ( overflow_limit , distributable ) ) |
def get ( self , request , * args , ** kwargs ) :
"""Retrieve list of service requests""" | if 'service_code' not in request . GET . keys ( ) :
return Response ( { 'detail' : _ ( 'A service code must be inserted' ) } , status = 404 )
service_code = request . GET [ 'service_code' ]
if service_code not in SERVICES . keys ( ) :
return Response ( { 'detail' : _ ( 'Service not found' ) } , status = 404 )
start_date = None
end_date = None
status = None
layer = None
STATUSES = { }
for status_type in ( 'open' , 'closed' ) :
STATUSES [ status_type ] = [ k for k , v in STATUS . items ( ) if v == status_type ]
if 'start_date' in request . GET . keys ( ) :
start_date = request . GET [ 'start_date' ]
if iso8601_REGEXP . match ( start_date ) is None :
return Response ( { 'detail' : _ ( 'Invalid date inserted' ) } , status = 404 )
if 'end_date' in request . GET . keys ( ) :
end_date = request . GET [ 'end_date' ]
if iso8601_REGEXP . match ( end_date ) is None :
return Response ( { 'detail' : _ ( 'Invalid date inserted' ) } , status = 404 )
if 'status' in request . GET . keys ( ) :
if request . GET [ 'status' ] not in ( 'open' , 'closed' ) :
return Response ( { 'detail' : _ ( 'Invalid status inserted' ) } , status = 404 )
status = request . GET [ 'status' ]
if 'layer' in request . GET . keys ( ) :
layer = request . GET [ 'layer' ]
node_layer = get_object_or_404 ( Layer , slug = layer )
service_model = MODELS [ service_code ]
if service_code in ( 'vote' , 'comment' , 'rate' ) :
self . queryset = service_model . objects . none ( )
else :
self . queryset = service_model . objects . all ( )
# Filter by layer
if layer is not None :
self . queryset = self . queryset . filter ( layer = node_layer )
# Check of date parameters
if start_date is not None and end_date is not None :
self . queryset = self . queryset . filter ( added__gte = start_date ) . filter ( added__lte = end_date )
if start_date is not None and end_date is None :
self . queryset = self . queryset . filter ( added__gte = start_date )
if start_date is None and end_date is not None :
self . queryset = self . queryset . filter ( added__lte = end_date )
# Check of status parameter
if status is not None :
q_list = [ Q ( status__slug__exact = s ) for s in STATUSES [ status ] ]
self . queryset = self . queryset . filter ( reduce ( operator . or_ , q_list ) )
return self . list ( request , * args , ** kwargs ) |
def get_metric_by_week ( self , unique_identifier , metric , from_date , limit = 10 , ** kwargs ) :
"""Returns the ` ` metric ` ` for ` ` unique _ identifier ` ` segmented by week
starting from ` ` from _ date ` `
: param unique _ identifier : Unique string indetifying the object this metric is for
: param metric : A unique name for the metric you want to track
: param from _ date : A python date object
: param limit : The total number of weeks to retrive starting from ` ` from _ date ` `""" | conn = kwargs . get ( "connection" , None )
closest_monday_from_date = self . _get_closest_week ( from_date )
metric_key_date_range = self . _get_weekly_date_range ( closest_monday_from_date , datetime . timedelta ( weeks = limit ) )
date_generator = ( closest_monday_from_date + datetime . timedelta ( days = i ) for i in itertools . count ( step = 7 ) )
# generate a list of mondays in between the start date and the end date
series = list ( itertools . islice ( date_generator , limit ) )
metric_keys = [ self . _get_weekly_metric_name ( metric , monday_date ) for monday_date in series ]
metric_func = lambda conn : [ conn . hmget ( self . _get_weekly_metric_key ( unique_identifier , metric_key_date ) , metric_keys ) for metric_key_date in metric_key_date_range ]
if conn is not None :
results = metric_func ( conn )
else :
with self . _analytics_backend . map ( ) as conn :
results = metric_func ( conn )
series , results = self . _parse_and_process_metrics ( series , results )
return series , results |
def validate_doc ( self , document : BioCDocument ) :
"""Validate a single document .""" | annotations = [ ]
annotations . extend ( document . annotations )
annotations . extend ( document . relations )
for passage in document . passages :
annotations . extend ( passage . annotations )
annotations . extend ( passage . relations )
for sentence in passage . sentences :
annotations . extend ( sentence . annotations )
annotations . extend ( sentence . relations )
self . current_docid = document . id
self . traceback . append ( document )
text = self . __get_doc_text ( document )
self . __validate_ann ( document . annotations , text , 0 )
self . __validate_rel ( annotations , document . relations , f'document {document.id}' )
for passage in document . passages :
self . traceback . append ( passage )
text = self . __get_passage_text ( passage )
self . __validate_ann ( passage . annotations , text , passage . offset )
self . __validate_rel ( annotations , passage . relations , f'document {document.id} --> passage {passage.offset}' )
for sentence in passage . sentences :
self . traceback . append ( sentence )
self . __validate_ann ( sentence . annotations , sentence . text , sentence . offset )
self . __validate_rel ( annotations , sentence . relations , f'document {document.id} --> sentence {sentence.offset}' )
self . traceback . pop ( )
self . traceback . pop ( )
self . traceback . pop ( ) |
def is_turbo_boost_enabled ( ) :
"""Check whether Turbo Boost ( scaling CPU frequency beyond nominal frequency )
is active on this system .
@ return : A bool , or None if Turbo Boost is not supported .""" | try :
if os . path . exists ( _TURBO_BOOST_FILE ) :
boost_enabled = int ( util . read_file ( _TURBO_BOOST_FILE ) )
if not ( 0 <= boost_enabled <= 1 ) :
raise ValueError ( 'Invalid value {} for turbo boost activation' . format ( boost_enabled ) )
return boost_enabled != 0
if os . path . exists ( _TURBO_BOOST_FILE_PSTATE ) :
boost_disabled = int ( util . read_file ( _TURBO_BOOST_FILE_PSTATE ) )
if not ( 0 <= boost_disabled <= 1 ) :
raise ValueError ( 'Invalid value {} for turbo boost activation' . format ( boost_enabled ) )
return boost_disabled != 1
except ValueError as e :
sys . exit ( "Could not read turbo-boost information from kernel: {0}" . format ( e ) ) |
def _new_url_record ( cls , request : Request ) -> URLRecord :
'''Return new empty URLRecord .''' | url_record = URLRecord ( )
url_record . url = request . url_info . url
url_record . status = Status . in_progress
url_record . try_count = 0
url_record . level = 0
return url_record |
def write_processed_litho_data ( filename , litho_data , litho_points ) :
"""Ensures that the data is stored in a format which is valid for initialising the class""" | np . savez_compressed ( filename , litho1_all_data = litho_data , litho1_mesh_coords = litho_points )
return |
def apply_time_offset ( time , years = 0 , months = 0 , days = 0 , hours = 0 ) :
"""Apply a specified offset to the given time array .
This is useful for GFDL model output of instantaneous values . For example ,
3 hourly data postprocessed to netCDF files spanning 1 year each will
actually have time values that are offset by 3 hours , such that the first
value is for 1 Jan 03:00 and the last value is 1 Jan 00:00 of the
subsequent year . This causes problems in xarray , e . g . when trying to group
by month . It is resolved by manually subtracting off those three hours ,
such that the dates span from 1 Jan 00:00 to 31 Dec 21:00 as desired .
Parameters
time : xarray . DataArray representing a timeseries
years , months , days , hours : int , optional
The number of years , months , days , and hours , respectively , to offset
the time array by . Positive values move the times later .
Returns
pandas . DatetimeIndex
Examples
Case of a length - 1 input time array :
> > > times = xr . DataArray ( datetime . datetime ( 1899 , 12 , 31 , 21 ) )
> > > apply _ time _ offset ( times )
Timestamp ( ' 1900-01-01 00:00:00 ' )
Case of input time array with length greater than one :
> > > times = xr . DataArray ( [ datetime . datetime ( 1899 , 12 , 31 , 21 ) ,
. . . datetime . datetime ( 1899 , 1 , 31 , 21 ) ] )
> > > apply _ time _ offset ( times ) # doctest : + NORMALIZE _ WHITESPACE
DatetimeIndex ( [ ' 1900-01-01 ' , ' 1899-02-01 ' ] , dtype = ' datetime64 [ ns ] ' ,
freq = None )""" | return ( pd . to_datetime ( time . values ) + pd . DateOffset ( years = years , months = months , days = days , hours = hours ) ) |
def resolve ( self , expr , name , safe = DEFAULT_SAFE , tostr = DEFAULT_TOSTR , scope = DEFAULT_SCOPE , besteffort = DEFAULT_BESTEFFORT ) :
"""Resolve an expression with possibly a dedicated expression resolvers .
: param str name : expression resolver registered name . Default is the
first registered expression resolver .
: param bool safe : if True ( Default ) , resolve in a safe context
( without I / O ) .
: param bool tostr : if True ( False by default ) , transform the result into
a string format .
: param dict scope : scope execution resolution .
: return : a string if tostr , otherwise , a python object .
: raises : KeyError if no expression resolver has been registered or if
name does not exist in expression resolvers .""" | resolver = None
if name is None :
name = self . default
resolver = self [ name ]
result = resolver ( expr = expr , safe = safe , tostr = tostr , scope = scope , besteffort = besteffort )
return result |
def encode_data_items ( self , * args ) :
"""Encodes a list of integers and strings into a concatenated string .
- encode string items as - is .
- encode integer items as base - 64 with a ` ` ' ~ ' ` ` prefix .
- concatenate encoded items with a ` ` ' | ' ` ` separator .
Example :
` ` encode _ data _ items ( ' abc ' , 123 , ' xyz ' ) ` ` returns ` ` ' abc | ~ B7 | xyz ' ` `""" | str_list = [ ]
for arg in args : # encode string items as - is
if isinstance ( arg , str ) :
arg_str = arg
# encode integer items as base - 64 strings with a ' ~ ' character in front
elif isinstance ( arg , int ) :
arg_str = self . INTEGER_PREFIX + self . encode_int ( arg )
# convert other types to string
else :
arg_str = str ( arg )
str_list . append ( arg_str )
# Concatenate strings with ' | ' separators
concatenated_str = self . SEPARATOR . join ( str_list )
return concatenated_str |
def scp_put ( self , src , dst ) :
"""Copy src file from local system to dst on remote system .""" | cmd = [ 'scp' , '-B' , '-oStrictHostKeyChecking=no' , '-oUserKnownHostsFile=/dev/null' , '-oLogLevel=ERROR' ]
if self . _key is not None :
cmd . extend ( [ '-i' , self . _key ] )
cmd . append ( src )
remote = ''
if self . _user is not None :
remote += self . _user + '@'
remote += self . _ip + ':' + dst
cmd . append ( remote )
try : # Actually ignore output on success , but capture stderr on failure
subprocess . check_output ( cmd , stderr = subprocess . STDOUT )
except subprocess . CalledProcessError as ex :
raise RuntimeError ( 'scp returned exit status %d:\n%s' % ( ex . returncode , ex . output . strip ( ) ) ) |
def as_p ( self , show_leaf = True , current_linkable = False , class_current = "active_link" ) :
"""It returns breadcrumb as p""" | return self . __do_menu ( "as_p" , show_leaf , current_linkable , class_current ) |
def open ( filename , frame = 'unspecified' ) :
"""Opens a segmentation image""" | data = Image . load_data ( filename )
return SegmentationImage ( data , frame ) |
def unique_files ( event_list ) :
"""Find the unique files
Parameters
event _ list : list or dcase _ util . containers . MetaDataContainer
A list containing event dicts
Returns
list
Unique filenames in alphabetical order""" | if isinstance ( event_list , dcase_util . containers . MetaDataContainer ) :
return event_list . unique_files
else :
files = { }
for event in event_list :
if 'file' in event :
files [ event [ 'file' ] ] = event [ 'file' ]
elif 'filename' in event :
files [ event [ 'filename' ] ] = event [ 'filename' ]
files = list ( files . keys ( ) )
files . sort ( )
return files |
def list_timezones ( ) :
"""Return a list of all time zones known to the system .""" | l = [ ]
for i in xrange ( parentsize ) :
l . append ( _winreg . EnumKey ( tzparent , i ) )
return l |
def pwm_scan_to_gff ( self , fa , gfffile , cutoff = 0.9 , nreport = 50 , scan_rc = True , append = False ) :
"""Scan sequences with this motif and save to a GFF file .
Scan sequences from a FASTA object with this motif . Less efficient
than using a Scanner object . By setting the cutoff to 0.0 and
nreport to 1 , the best match for every sequence will be returned .
The output is save to a file in GFF format .
Parameters
fa : Fasta object
Fasta object to scan .
gfffile : str
Filename of GFF output file .
cutoff : float , optional
Cutoff to use for motif scanning . This cutoff is not specifically
optimized and the strictness will vary a lot with motif lengh .
nreport : int , optional
Maximum number of matches to report .
scan _ rc : bool , optional
Scan the reverse complement . True by default .
append : bool , optional
Append to GFF file instead of overwriting it . False by default .""" | if append :
out = open ( gfffile , "a" )
else :
out = open ( gfffile , "w" )
c = self . pwm_min_score ( ) + ( self . pwm_max_score ( ) - self . pwm_min_score ( ) ) * cutoff
pwm = self . pwm
strandmap = { - 1 : "-" , "-1" : "-" , "-" : "-" , "1" : "+" , 1 : "+" , "+" : "+" }
gff_line = ( "{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t" "motif_name \"{}\" ; motif_instance \"{}\"\n" )
for name , seq in fa . items ( ) :
result = pfmscan ( seq . upper ( ) , pwm , c , nreport , scan_rc )
for score , pos , strand in result :
out . write ( gff_line . format ( name , pos , pos + len ( pwm ) , score , strandmap [ strand ] , self . id , seq [ pos : pos + len ( pwm ) ] ) )
out . close ( ) |
def process_rewards ( self , rewards ) :
"""Clips , rounds , and changes to integer type .
Args :
rewards : numpy array of raw ( float ) rewards .
Returns :
processed _ rewards : numpy array of np . int64""" | min_reward , max_reward = self . reward_range
# Clips at min and max reward .
rewards = np . clip ( rewards , min_reward , max_reward )
# Round to ( nearest ) int and convert to integral type .
rewards = np . around ( rewards , decimals = 0 ) . astype ( np . int64 )
return rewards |
def _to_dict ( self ) :
"""Return a json dictionary representing this model .""" | _dict = { }
if hasattr ( self , 'entities' ) and self . entities is not None :
_dict [ 'entities' ] = [ x . _to_dict ( ) for x in self . entities ]
if hasattr ( self , 'pagination' ) and self . pagination is not None :
_dict [ 'pagination' ] = self . pagination . _to_dict ( )
return _dict |
def _populate_relationships ( self , rec_curr ) :
"""Convert GO IDs in relationships to GO Term record objects . Populate children .""" | for relationship_type , goids in rec_curr . relationship . items ( ) :
parent_recs = set ( [ self [ goid ] for goid in goids ] )
rec_curr . relationship [ relationship_type ] = parent_recs
for parent_rec in parent_recs :
if relationship_type not in parent_rec . relationship_rev :
parent_rec . relationship_rev [ relationship_type ] = set ( [ rec_curr ] )
else :
parent_rec . relationship_rev [ relationship_type ] . add ( rec_curr ) |
def normalize_ext ( filepath ) :
"""Convert file extension ( s ) to normalized form , e . g . ' . tgz ' - > ' . tar . gz '
Normalized extensions are ordered in reverse order of how they should be processed .
Also extensions are ordered in order of decreasing specificity / detail .
e . g . zip last , then txt / bin , then model type , then model dimensionality
. TGZ = > . tar . gz
. ZIP = > . zip
. tgz = > . tar . gz
. bin . gz = > . w2v . bin . gz
.6B . zip = > . 6B . glove . txt . zip
.27B . zip = > . 27B . glove . txt . zip
.42B . 300d . zip = > . 42B . 300d . glove . txt . zip
.840B . 300d . zip = > . 840B . 300d . glove . txt . zip
FIXME : Don ' t do this ! Stick with the original file names and let the text loader figure out what it is !
TODO : use regexes to be more general ( deal with . 300D and . 42B extensions )
> > > normalize _ ext ( ' glove . 42B . 300d . zip ' )
' glove . 42B . 300d . glove . txt . zip '""" | mapping = tuple ( reversed ( ( ( '.tgz' , '.tar.gz' ) , ( '.bin.gz' , '.w2v.bin.gz' ) , ( '.6B.zip' , '.6b.glove.txt.zip' ) , ( '.42B.zip' , '.42b.glove.txt.zip' ) , ( '.27B.zip' , '.27b.glove.txt.zip' ) , ( '.300d.zip' , '.300d.glove.txt.zip' ) , ) ) )
if not isinstance ( filepath , str ) :
return [ normalize_ext ( fp ) for fp in filepath ]
if '~' == filepath [ 0 ] or '$' in filepath :
filepath = expand_filepath ( filepath )
fplower = filepath . lower ( )
for ext , newext in mapping :
r = ext . lower ( ) . replace ( '.' , r'\.' ) + r'$'
r = r'^[.]?([^.]*)\.([^.]{1,10})*' + r
if re . match ( r , fplower ) and not fplower . endswith ( newext ) :
filepath = filepath [ : - len ( ext ) ] + newext
return filepath |
def matrix ( fasta_path : 'path to tictax annotated fasta input' , scafstats_path : 'path to BBMap scaftstats file' ) :
'''Generate taxonomic count matrix from tictax classified contigs''' | records = SeqIO . parse ( fasta_path , 'fasta' )
df = tictax . matrix ( records , scafstats_path )
df . to_csv ( sys . stdout ) |
def _printSourceCode ( self , hrlinetop = True ) :
"""print ( more informative stats about the object )""" | if not self . currentEntity : # = = > ontology level
return
x = self . currentEntity [ 'object' ]
if hrlinetop :
self . _print ( "----------------" )
self . _print ( "Source:" , "IMPORTANT" )
self . do_serialize ( "turtle" )
self . _print ( "----------------" )
return |
def on_recv ( self , cf ) :
"""Function that must be called every time a CAN frame is received , to
advance the state machine .""" | data = bytes ( cf . data )
if len ( data ) < 2 :
return
ae = 0
if self . extended_rx_addr is not None :
ae = 1
if len ( data ) < 3 :
return
if six . indexbytes ( data , 0 ) != self . extended_rx_addr :
return
n_pci = six . indexbytes ( data , ae ) & 0xf0
if n_pci == N_PCI_FC :
with self . tx_mutex :
self . _recv_fc ( data [ ae : ] )
elif n_pci == N_PCI_SF :
with self . rx_mutex :
self . _recv_sf ( data [ ae : ] )
elif n_pci == N_PCI_FF :
with self . rx_mutex :
self . _recv_ff ( data [ ae : ] )
elif n_pci == N_PCI_CF :
with self . rx_mutex :
self . _recv_cf ( data [ ae : ] ) |
def add_diagnostic ( self , name , value = None ) :
"""Create a new diagnostic variable called ` ` name ` ` for this process
and initialize it with the given ` ` value ` ` .
Quantity is accessible in two ways :
* as a process attribute , i . e . ` ` proc . name ` `
* as a member of the diagnostics dictionary ,
i . e . ` ` proc . diagnostics [ ' name ' ] ` `
Use attribute method to set values , e . g .
` ` ` proc . name = value ` ` `
: param str name : name of diagnostic quantity to be initialized
: param array value : initial value for quantity [ default : None ]
: Example :
Add a diagnostic CO2 variable to an energy balance model : :
> > > import climlab
> > > model = climlab . EBM ( )
> > > # initialize CO2 variable with value 280 ppm
> > > model . add _ diagnostic ( ' CO2 ' , 280 . )
> > > # access variable directly or through diagnostic dictionary
> > > model . CO2
280
> > > model . diagnostics . keys ( )
[ ' ASR ' , ' CO2 ' , ' net _ radiation ' , ' icelat ' , ' OLR ' , ' albedo ' ]""" | self . _diag_vars . append ( name )
self . __setattr__ ( name , value ) |
def flavor_extra_delete ( request , flavor_id , keys ) :
"""Unset the flavor extra spec keys .""" | flavor = _nova . novaclient ( request ) . flavors . get ( flavor_id )
return flavor . unset_keys ( keys ) |
def parse_params ( self , ngpu = 1 , ** kwargs ) :
"""Take in a dictionary of parameters and applies attack - specific checks
before saving them as attributes .
Attack - specific parameters :
: param ngpu : ( required int ) the number of GPUs available .
: param kwargs : A dictionary of parameters for MadryEtAl attack .""" | return_status = super ( MadryEtAlMultiGPU , self ) . parse_params ( ** kwargs )
self . ngpu = ngpu
return return_status |
def buffer ( self , data = None , * , reserve = 0 , dynamic = False ) -> Buffer :
'''Create a : py : class : ` Buffer ` object .
Args :
data ( bytes ) : Content of the new buffer .
Keyword Args :
reserve ( int ) : The number of bytes to reserve .
dynamic ( bool ) : Treat buffer as dynamic .
Returns :
: py : class : ` Buffer ` object''' | if type ( reserve ) is str :
reserve = mgl . strsize ( reserve )
res = Buffer . __new__ ( Buffer )
res . mglo , res . _size , res . _glo = self . mglo . buffer ( data , reserve , dynamic )
res . _dynamic = dynamic
res . ctx = self
res . extra = None
return res |
def close_idle_connections ( self , pool_id = None ) :
"""close idle connections to mongo""" | if not hasattr ( self , '_pools' ) :
return
if pool_id :
if pool_id not in self . _pools :
raise ProgrammingError ( "pool %r does not exist" % pool_id )
else :
pool = self . _pools [ pool_id ]
pool . close ( )
else :
for pool_id , pool in self . _pools . items ( ) :
pool . close ( ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.